{"signature": "def debug(user, message):", "body": "message_user(user, message, constants.DEBUG)", "docstring": "Adds a message with the ``DEBUG`` level.\n\n:param user: User instance\n:param message: Message to show", "id": "f4:m0"} {"signature": "def success(user, message):", "body": "message_user(user, message, constants.SUCCESS)", "docstring": "Adds a message with the ``SUCCESS`` level.\n\n:param user: User instance\n:param message: Message to show", "id": "f4:m2"} {"signature": "def process_response(self, request, response):", "body": "if hasattr(request, \"\") and hasattr(request, \"\") and request.user.is_authenticated():msgs = get_messages(request.user)if msgs:for msg, level in msgs:messages.add_message(request, level, msg)return response", "docstring": "Check for messages for this user and, if it exists,\ncall the messages API with it", "id": "f5:c0:m0"} {"signature": "def table(name, auth=None, eager=True):", "body": "auth = auth or []dynamodb = boto.connect_dynamodb(*auth)table = dynamodb.get_table(name)return Table(table=table, eager=eager)", "docstring": "Returns a given table for the given user.", "id": "f8:m0"} {"signature": "def api_request(methods=None, require_token=True):", "body": "def decorator(view_func):@wraps(view_func, assigned=available_attrs(view_func))def _wrapped_view(request, *args, **kwargs):ApiToken = apps.get_model('', '')m = methods if methods is not None else DEFAULT_API_METHODSif request.method not in m:response = ApiResponse(False, '', status=)response[''] = ''.join(methods)return responsetry:data = json.loads(request.body.decode('')) if request.body else {}if require_token:token_string = request.GET[''] if request.method == '' else data['']try:token = ApiToken.objects.get(token=token_string)token.save() data[''] = tokenexcept ApiToken.DoesNotExist:logger.exception(''.format(token_string))return ApiResponse(False, '', status=)return ApiResponse(data=view_func(request, data=data, *args, **kwargs))except Exception as e:if e.__class__.__name__ == '':logger.exception('')return ApiResponse(False, ''.format(e), status=)else:logger.exception('')return ApiResponse(False, ''.format(e), status=)return _wrapped_viewreturn decorator", "docstring": "View decorator that handles JSON based API requests and responses consistently.\n:param methods: A list of allowed methods\n:param require_token: Whether API token is checked automatically or not", "id": "f21:m0"} {"signature": "def get_tweets(user, pages=):", "body": "url = f''headers = {'': '','': f'','': '','': '','': '','': ''}def gen_tweets(pages):r = session.get(url, headers=headers)while pages > :try:html = HTML(html=r.json()[''],url='', default_encoding='')except KeyError:raise ValueError(f'')comma = \"\"dot = \"\"tweets = []for tweet in html.find(''):try:text = tweet.find('')[].full_textexcept IndexError: continuetweet_id = tweet.find('')[].attrs['']time = datetime.fromtimestamp(int(tweet.find('')[].attrs['']) / )interactions = [x.textfor x in tweet.find('')]replies = int(interactions[].split('')[].replace(comma, '').replace(dot, '')or interactions[])retweets = int(interactions[].split('')[].replace(comma, '').replace(dot, '')or interactions[]or interactions[])likes = int(interactions[].split('')[].replace(comma, '').replace(dot, '')or interactions[]or interactions[])hashtags = [hashtag_node.full_textfor hashtag_node in tweet.find('')]urls = [url_node.attrs['']for url_node in tweet.find('')]photos = [photo_node.attrs['']for photo_node in tweet.find('')]videos = []video_nodes = tweet.find(\"\")for node in video_nodes:styles = node.attrs[''].split()for style in styles:if style.startswith(''):tmp = style.split('')[-]video_id = tmp[:tmp.index('')]videos.append({'': video_id})tweets.append({'': tweet_id,'': time,'': text,'': replies,'': retweets,'': likes,'': {'': hashtags, '': urls,'': photos, '': videos}})last_tweet = html.find('')[-].attrs['']for tweet in tweets:if tweet:tweet[''] = re.sub('', '', tweet[''], )yield tweetr = session.get(url, params={'': last_tweet}, headers=headers)pages += -yield from gen_tweets(pages)", "docstring": "Gets tweets for a given user, via the Twitter frontend API.", "id": "f32:m0"} {"signature": "def add_deformation(chn_names, data):", "body": "if \"\" not in chn_names:for ii, ch in enumerate(chn_names):if ch == \"\":chn_names.append(\"\")data.append(-data[ii])return chn_names, data", "docstring": "From circularity, compute the deformation\n\n This method is useful for RT-DC data sets that contain\n the circularity but not the deformation.", "id": "f44:m1"} {"signature": "def get_leaves(self):", "body": "return [n for n in self.walk() if n.is_leaf]", "docstring": "Get all the leaf nodes of the subtree descending from this node.\n\n:return: List of Nodes with no descendants.", "id": "f55:c0:m14"} {"signature": "def get_node(self, label):", "body": "for n in self.walk():if n.name == label:return n", "docstring": "Gets the specified node by name.\n\n:return: Node or None if name does not exist in tree", "id": "f55:c0:m15"} {"signature": "def get_leaf_names(self):", "body": "return [n.name for n in self.get_leaves()]", "docstring": "Get the names of all the leaf nodes of the subtree descending from\nthis node.\n\n:return: List of names of Nodes with no descendants.", "id": "f55:c0:m16"} {"signature": "@classmethoddef create(cls, name=None, length=None, descendants=None, **kw):", "body": "node = cls(name=name, length=length, **kw)for descendant in descendants or []:node.add_descendant(descendant)return node", "docstring": "Create a new `Node` object.\n\n:param name: Node label.\n:param length: Branch length from the new node to its parent.\n:param descendants: list of descendants or `None`.\n:param kw: Additonal keyword arguments are passed through to `Node.__init__`.\n:return: `Node` instance.", "id": "f55:c0:m4"} {"signature": "@propertydef newick(self):", "body": "label = self.name or ''if self._length:label += '' + self._lengthdescendants = ''.join([n.newick for n in self.descendants])if descendants:descendants = '' + descendants + ''return descendants + label", "docstring": "The representation of the Node in Newick format.", "id": "f55:c0:m6"} {"signature": "def loads(s, strip_comments=False, **kw):", "body": "kw[''] = strip_commentsreturn [parse_node(ss.strip(), **kw) for ss in s.split('') if ss.strip()]", "docstring": "Load a list of trees from a Newick formatted string.\n\n:param s: Newick formatted string.\n:param strip_comments: Flag signaling whether to strip comments enclosed in square \\\nbrackets.\n:param kw: Keyword arguments are passed through to `Node.create`.\n:return: List of Node objects.", "id": "f55:m2"} {"signature": "def visit(self, visitor, predicate=None, **kw):", "body": "predicate = predicate or boolfor n in self.walk(**kw):if predicate(n):visitor(n)", "docstring": "Apply a function to matching nodes in the (sub)tree rooted at self.\n\n:param visitor: A callable accepting a Node object as single argument..\n:param predicate: A callable accepting a Node object as single argument and \\\nreturning a boolean signaling whether Node matches; if `None` all nodes match.\n:param kw: Addtional keyword arguments are passed through to self.walk.", "id": "f55:c0:m12"} {"signature": "def ascii_art(self, strict=False, show_internal=True):", "body": "cmap = {'': '','': '','': '','': '','': '','': '','': '',}def normalize(line):m = re.compile('')line = m.sub(lambda m: m.group('')[:], line)line = re.sub('', '', line) line = re.sub('', '', line) line = re.sub('', '', line) if strict:for u, a in cmap.items():line = line.replace(u, a)return linereturn ''.join(normalize(l) for l in self._ascii_art(show_internal=show_internal)[]if set(l) != {'', ''})", "docstring": "Return a unicode string representing a tree in ASCII art fashion.\n\n:param strict: Use ASCII characters strictly (for the tree symbols).\n:param show_internal: Show labels of internal nodes.\n:return: unicode string\n\n>>> node = loads('((A,B)C,((D,E)F,G,H)I)J;')[0]\n>>> print(node.ascii_art(show_internal=False, strict=True))\n /-A\n /---|\n | \\-B\n----| /-D\n | /---|\n | | \\-E\n \\---|\n |-G\n \\-H", "id": "f55:c0:m8"} {"signature": "def close(self):", "body": "pass", "docstring": "Close the socket.", "id": "f58:c0:m4"} {"signature": "def settimeout(self, timeout):", "body": "pass", "docstring": "Set a timeout.", "id": "f58:c0:m3"} {"signature": "def recv(self, buffer_size):", "body": "return self.msg[:buffer_size]", "docstring": "Receive a message.", "id": "f58:c0:m1"} {"signature": "async def read(self, buffer_size):", "body": "return self.msg[:buffer_size]", "docstring": "Read a message.", "id": "f60:c0:m1"} {"signature": "def write(self, msg):", "body": "self.msg = msg", "docstring": "Write a message.", "id": "f60:c0:m0"} {"signature": "async def wait_for(self, cmd, value=None, timeout=):", "body": "try:async with async_timeout(timeout * ):while True:msgs = await self.receive()msg = check_messages(msgs, cmd, value=value)if msg:return msgexcept asyncio.TimeoutError:return OrderedDict()", "docstring": "Hang until command is received.\n\n If value is supplied, it will hang until ``cmd:value`` is received.\n\n Parameters\n ----------\n cmd : string\n Command to wait for in bytestring from microscope CAM interface. If\n ``value`` is falsey, value of received command does not matter.\n value : string\n Wait until ``cmd:value`` is received.\n timeout : int\n Minutes to wait for command. If timeout is reached, an empty\n OrderedDict will be returned.\n\n Returns\n -------\n collections.OrderedDict\n Last received messsage or empty message if timeout is reached.", "id": "f63:c0:m4"} {"signature": "async def connect(self):", "body": "self.reader, self.writer = await asyncio.open_connection(self.host, self.port, loop=self.loop)self.welcome_msg = await self.reader.read(self.buffer_size)", "docstring": "Connect to LASAF through a CAM-socket.", "id": "f63:c0:m1"} {"signature": "async def send(self, commands):", "body": "msg = self._prepare_send(commands)self.writer.write(msg)await self.writer.drain()", "docstring": "Send commands to LASAF through CAM-socket.\n\n Parameters\n ----------\n commands : list of tuples or bytes string\n Commands as a list of tuples or a bytes string. cam.prefix is\n allways prepended before sending.\n\n Returns\n -------\n int\n Bytes sent.\n\n Example\n -------\n ::\n\n >>> # send list of tuples\n >>> await cam.send([('cmd', 'enableall'), ('value', 'true')])\n\n >>> # send bytes string\n >>> await cam.send(b'/cmd:enableall /value:true')", "id": "f63:c0:m2"} {"signature": "def close(self):", "body": "if self.writer.can_write_eof():self.writer.write_eof()self.writer.close()", "docstring": "Close stream.", "id": "f63:c0:m5"} {"signature": "def logger(function):", "body": "@functools.wraps(function)def wrapper(*args, **kwargs):\"\"\"\"\"\"sep = kwargs.get('', '')end = kwargs.get('', '') out = sep.join([repr(x) for x in args])out = out + end_LOGGER.debug(out)return function(*args, **kwargs)return wrapper", "docstring": "Decorate passed in function and log message to module logger.", "id": "f64:m0"} {"signature": "def autofocus_scan(self):", "body": "cmd = [('', '')]self.send(cmd)return self.wait_for(*cmd[])", "docstring": "Start the autofocus job.", "id": "f64:c1:m9"} {"signature": "def close(self):", "body": "self.socket.close()", "docstring": "Close the socket.", "id": "f64:c1:m6"} {"signature": "def give_another_quote(q):", "body": "for qc in QUOTES:if qc != q:return qcelse:raise ValueError(u''.format(q))", "docstring": "When you pass a quote character, returns you an another one if possible", "id": "f75:m0"} {"signature": "def find_by(self, **params):", "body": "return self.filter(Q.from_dict(params))", "docstring": "Searches in ManageIQ using the ``filter[]`` get parameter.\n\n This method only supports logical AND so all key/value pairs are considered as equality\n comparision and all are logically anded.", "id": "f76:c4:m8"} {"signature": "def _get_entity_from_href(self, result):", "body": "href_result = result['']if self.collection._href.startswith(href_result):return Entity(self.collection, result, incomplete=True)href_match = re.match(r\"\", href_result)if not href_match:raise ValueError(\"\".format(href_result))collection_name = href_match.group()entry_point = href_match.group()new_collection = Collection(self.collection.api,\"\".format(entry_point, collection_name),collection_name)return Entity(new_collection, result, incomplete=True)", "docstring": "Returns entity in correct collection.\n\n If the \"href\" value in result doesn't match the current collection,\n try to find the collection that the \"href\" refers to.", "id": "f76:c7:m4"} {"signature": "def query_string(self, **params):", "body": "return SearchResult(self, self._api.get(self._href, **params))", "docstring": "Specify query string to use with the collection.\n\n Returns: :py:class:`SearchResult`", "id": "f76:c4:m5"} {"signature": "@main.command('')@click.argument('', cls=SectionArgument)@click.argument('',required=False)@click.option('','',is_flag=True,help='')def set_variable(section, value, create):", "body": "if not value:value = sectionsection = Nonetry:logger.debug('')settings = config.Settings(section=section)conf = s3conf.S3Conf(settings=settings)env_vars = conf.get_envfile()env_vars.set(value, create=create)except exceptions.EnvfilePathNotDefinedError:raise exceptions.EnvfilePathNotDefinedUsageError()", "docstring": "Set value of a variable in an environment file for the given section.\nIf the variable is already defined, its value is replaced, otherwise, it is added to the end of the file.\nThe value is given as \"ENV_VAR_NAME=env_var_value\", e.g.:\n\ns3conf set test ENV_VAR_NAME=env_var_value", "id": "f82:m8"} {"signature": "@main.command('')@click.argument('',required=False)@click.option('','',is_flag=True,help='''')@click.option('',is_flag=True,help='''')@click.option('',default='',show_default=True,help='')@click.option('','',is_flag=True,help='')@click.option('','',is_flag=True)@click.option('','',is_flag=True,help='')def env(section, map_files, phusion, phusion_path, quiet, edit, create):", "body": "try:logger.debug('')settings = config.Settings(section=section)storage = STORAGES[''](settings=settings)conf = s3conf.S3Conf(storage=storage, settings=settings)if edit:conf.edit(create=create)else:env_vars = conf.get_envfile().as_dict()if env_vars.get('') and map_files:conf.download_mapping(env_vars.get(''))if not quiet:for var_name, var_value in sorted(env_vars.items(), key=lambda x: x[]):click.echo(''.format(var_name, var_value))if phusion:s3conf.phusion_dump(env_vars, phusion_path)except exceptions.EnvfilePathNotDefinedError:raise exceptions.EnvfilePathNotDefinedUsageError()except exceptions.FileDoesNotExist as e:raise UsageError(''.format(str(e)))", "docstring": "Reads the file defined by the S3CONF variable and output its contents to stdout. Logs are printed to stderr.\nSee options for added functionality: editing file, mapping files, dumping in the phusion-baseimage format, etc.", "id": "f82:m1"} {"signature": "@click.group(invoke_without_command=True)@click.version_option(version=__version__)@click.option('', '', is_flag=True)@click.option('','',is_flag=True,help='')@click.pass_context@click_log.simple_verbosity_option('')def main(ctx, edit, create):", "body": "try:click_log.basic_config('')logger.debug('')if edit:if ctx.invoked_subcommand is None:logger.debug('', config.LOCAL_CONFIG_FILE)config.ConfigFileResolver(config.LOCAL_CONFIG_FILE).edit(create=create)returnelse:raise UsageError('')if ctx.invoked_subcommand is None:click.echo(main.get_help(ctx))except exceptions.FileDoesNotExist as e:raise UsageError(''.format(str(e)))", "docstring": "Simple command line tool to help manage environment variables stored in a S3-like system. Facilitates editing text\nfiles remotely stored, as well as downloading and uploading files.", "id": "f82:m0"} {"signature": "@main.command('')@click.argument('')@click.argument('')def init(section, remote_file):", "body": "if not remote_file.startswith(''):raise UsageError('''')logger.debug('')config_file_path = os.path.join(os.getcwd(), '', '')config_file = config.ConfigFileResolver(config_file_path, section=section)config_file.set('', remote_file)gitignore_file_path = os.path.join(os.getcwd(), '', '')config_file.save()open(gitignore_file_path, '').write('')", "docstring": "Creates the .s3conf config folder and .s3conf/config config file\nwith the provided section name and configuration file. It is a very\nbasic config file. Manually edit it in order to add credentials. E.g.:\n\ns3conf init development s3://my-project/development.env", "id": "f82:m10"} {"signature": "@register.simple_tagdef djfrontend_twbs_theme_css(version=None):", "body": "if version is None:if not getattr(settings, '', False):version = getattr(settings, '', DJFRONTEND_TWBS_VERSION_DEFAULT)else:version = getattr(settings, '', DJFRONTEND_TWBS_VERSION_DEFAULT)return format_html('',static=_static_url, v=version, min=_min)", "docstring": "Returns Twitter Bootstrap Theme CSS file.", "id": "f94:m14"} {"signature": "@register.simple_tagdef djfrontend_jquery(version=None):", "body": "if version is None:version = getattr(settings, '', DJFRONTEND_JQUERY_DEFAULT)if getattr(settings, '', False):template = ''else:template = ('''')return format_html(template, static=_static_url, v=version)", "docstring": "Returns jQuery JavaScript file according to version number.\nTEMPLATE_DEBUG returns full file, otherwise returns minified file from Google CDN with local fallback.\nIncluded in HTML5 Boilerplate.", "id": "f94:m5"} {"signature": "@register.simple_tagdef djfrontend_twbs_js(version=None, files=None):", "body": "if version is None:if not getattr(settings, '', False):version = getattr(settings, '', DJFRONTEND_TWBS_VERSION_DEFAULT)else:version = getattr(settings, '', DJFRONTEND_TWBS_VERSION_DEFAULT)if files:if files != '':files = files.split('')elif getattr(settings, '', False) and settings.DJFRONTEND_TWBS_JS_FILES != '':files = settings.DJFRONTEND_TWBS_JS_FILES.split('')else:files = ''if files == '':return format_html('''',v=version, static=_static_url)else:if '' in files and '' not in files:files.append('')for file in files:file = ['' %(_static_url, version, file) for file in files]return mark_safe(''.join(file))", "docstring": "Returns Twitter Bootstrap JavaScript file(s).\nall returns concatenated file; full file for TEMPLATE_DEBUG, minified otherwise.\n\nOther choice are:\n affix,\n alert,\n button,\n carousel,\n collapse,\n dropdown,\n modal,\n popover (adds tooltip if not included),\n scrollspy,\n tab,\n tooltip,\n transition.\n\nIndividual files are not minified.", "id": "f94:m15"} {"signature": "def search(self, **kwargs):", "body": "params = {}available_params = [\"\", \"\", \"\", \"\",\"\", \"\", \"\", \"\", \"\",\"\", \"\",\"\", \"\", \"\"]for key in available_params:if key in kwargs:params[key] = kwargs[key]results = self.api.get(\"\", params)return results", "docstring": ":param entity_id: location id\n:param entity_type: location type (city, subzone, zone, lanmark, metro , group)\n:param q: search keyword\n:param start: fetch results after offset\n:param count: max number of results to display\n:param lat: latitude\n:param lon: longitude\n:param radius: radius around (lat,lon); to define search area, defined in meters(M)\n:param cuisines: list of cuisine id's separated by comma\n:param establishment_type: estblishment id obtained from establishments call\n:param collection_id: collection id obtained from collections call\n:param category: category ids obtained from categories call\n:param sort: sort restaurants by (cost, rating, real_distance)\n:param order: used with 'sort' parameter to define ascending / descending\n:return: json response\nThe location input can be specified using Zomato location ID or coordinates. Cuisine / Establishment /\nCollection IDs can be obtained from respective api calls.\n\nPartner Access is required to access photos and reviews.\n\nExamples:\n- To search for 'Italian' restaurants in 'Manhattan, New York City',\nset cuisines = 55, entity_id = 94741 and entity_type = zone\n- To search for 'cafes' in 'Manhattan, New York City',\nset establishment_type = 1, entity_type = zone and entity_id = 94741\n- Get list of all restaurants in 'Trending this Week' collection in 'New York City' by using\nentity_id = 280, entity_type = city and collection_id = 1", "id": "f103:c0:m12"} {"signature": "def getRestaurantDetails(self, restaurant_id):", "body": "params = {\"\": restaurant_id}restaurant_details = self.api.get(\"\", params)return restaurant_details", "docstring": ":param restaurant_id: id of restaurant whose details are requested\n:return: json response\nGet detailed restaurant information using Zomato restaurant ID.\nPartner Access is required to access photos and reviews.", "id": "f103:c0:m10"} {"signature": "def getEstablishments(self, city_id, **kwargs):", "body": "params = {\"\": city_id}optional_params = [\"\", \"\"]for key in optional_params:if key in kwargs:params[key] = kwargs[key]establishments = self.api.get(\"\", params)return establishments", "docstring": ":param city_id: id of the city for which collections are needed\n:param lat: latitude\n:param lon: longitude\nGet a list of restaurant types in a city. The location/City input can be provided in the following ways\n- Using Zomato City ID\n- Using coordinates of any location within a city\nList of all restaurants categorized under a particular restaurant type can obtained using\n/Search API with Establishment ID and location details as inputs", "id": "f103:c0:m5"} {"signature": "def parse(self):", "body": "nevents_wrong = feed_json = json.loads(self.feed)if '' not in feed_json['']:returnself.cells = feed_json['']['']self.ncell = event_fields = self.__get_event_fields()while self.ncell < len(self.cells):event = self.__get_next_event(event_fields)if event[''] is None or event[''] is None:logger.warning(\"\", event)nevents_wrong += continueyield eventlogger.info(\"\", nevents_wrong)", "docstring": "Parse the MozillaClub spreadsheet feed cells json.", "id": "f116:c2:m1"} {"signature": "def __get_event_fields(self):", "body": "event_fields = {}while self.ncell < len(self.cells):cell = self.cells[self.ncell]row = cell['']['']if int(row) > :breakncol = int(cell[''][''])name = cell['']['']event_fields[ncol] = nameif ncol in EVENT_TEMPLATE:if event_fields[ncol] != EVENT_TEMPLATE[ncol]:logger.warning(\"\",name, EVENT_TEMPLATE[ncol])else:logger.warning(\"\", name)self.ncell += return event_fields", "docstring": "Get the events fields (columns) from the cells received.", "id": "f116:c2:m2"} {"signature": "@classmethoddef has_resuming(cls):", "body": "return False", "docstring": "Returns whether it supports to resume the fetch process.\n\n :returns: this backend supports items resuming", "id": "f116:c0:m4"} {"signature": "def get_items(self, category=CATEGORY_EVENT, offset=REMO_DEFAULT_OFFSET):", "body": "more = True next_uri = None page = ReMoClient.FIRST_PAGEpage += int(offset / ReMoClient.ITEMS_PER_PAGE)if category == CATEGORY_EVENT:api = self.api_events_urlelif category == CATEGORY_ACTIVITY:api = self.api_activities_urlelif category == CATEGORY_USER:api = self.api_users_urlelse:raise ValueError(category + '')while more:params = {\"\": page,\"\": \"\"}logger.debug(\"\",api, str(params))raw_items = self.fetch(api, payload=params)yield raw_itemsitems_data = json.loads(raw_items)next_uri = items_data['']if not next_uri:more = Falseelse:parsed_uri = urllib.parse.urlparse(next_uri)parsed_params = urllib.parse.parse_qs(parsed_uri.query)page = parsed_params[''][]", "docstring": "Retrieve all items for category using pagination", "id": "f117:c1:m1"} {"signature": "def metadata(self, item, filter_classified=False):", "body": "item = super().metadata(item, filter_classified=filter_classified)item[''] = item[''].pop('')return item", "docstring": "ReMo metadata.\n\n This method takes items overrides `metadata` method to add extra\n information related to Remo (offset of the item).\n\n :param item: an item fetched by a backend\n :param filter_classified: sets if classified fields were filtered", "id": "f117:c0:m3"} {"signature": "@classmethoddef has_archiving(cls):", "body": "return True", "docstring": "Returns whether it supports archiving items on the fetch process.\n\n :returns: this backend supports items archive", "id": "f117:c0:m4"} {"signature": "def fetch_items(self, category, **kwargs):", "body": "offset = kwargs['']logger.info(\"\",self.url, category, offset)nitems = titems = page = int(offset / ReMoClient.ITEMS_PER_PAGE)page_offset = page * ReMoClient.ITEMS_PER_PAGEdrop_items = offset - page_offsetlogger.debug(\"\",drop_items, offset, page, page_offset)current_offset = offsetfor raw_items in self.client.get_items(category, offset):items_data = json.loads(raw_items)titems = items_data['']logger.info(\"\",titems - current_offset, current_offset)items = items_data['']for item in items:if drop_items > :drop_items -= continueraw_item_details = self.client.fetch(item[''])item_details = json.loads(raw_item_details)item_details[''] = current_offsetcurrent_offset += yield item_detailsnitems += logger.info(\"\", nitems, titems, offset)", "docstring": "Fetch items\n\n :param category: the category of items to fetch\n :param kwargs: backend arguments\n\n :returns: a generator of items", "id": "f117:c0:m2"} {"signature": "@classmethoddef setup_cmd_parser(cls):", "body": "parser = BackendCommandArgumentParser(cls.BACKEND.CATEGORIES,offset=True,archive=True)parser.parser.add_argument('', nargs='',default=\"\",help=\"\")return parser", "docstring": "Returns the ReMo argument parser.", "id": "f117:c2:m0"} {"signature": "def crates(self, from_page=):", "body": "path = urijoin(CRATES_API_URL, CATEGORY_CRATES)raw_crates = self.__fetch_items(path, from_page)return raw_crates", "docstring": "Get crates in alphabetical order", "id": "f118:c1:m2"} {"signature": "@classmethoddef has_resuming(cls):", "body": "return False", "docstring": "Returns whether it supports to resume the fetch process.\n\n :returns: this backend supports items resuming", "id": "f118:c0:m4"} {"signature": "def fetch(self, url, payload=None):", "body": "response = super().fetch(url, payload=payload)return response.text", "docstring": "Return the textual content associated to the Response object", "id": "f118:c1:m6"} {"signature": "def __fetch_items(self, path, page=):", "body": "fetch_data = Trueparsed_crates = total_crates = while fetch_data:logger.debug(\"\", page)try:payload = {'': '', '': page}raw_content = self.fetch(path, payload=payload)content = json.loads(raw_content)parsed_crates += len(content[''])if not total_crates:total_crates = content['']['']except requests.exceptions.HTTPError as e:logger.error(\"\", e.response.text)raise eyield raw_contentpage += if parsed_crates >= total_crates:fetch_data = False", "docstring": "Return the items from Crates.io API using pagination", "id": "f118:c1:m5"} {"signature": "def _init_client(self, from_archive=False):", "body": "return CratesClient(self.sleep_time, self.archive, from_archive)", "docstring": "Init client", "id": "f118:c0:m8"} {"signature": "@staticmethoddef metadata_category(item):", "body": "if '' in item:return CATEGORY_SUMMARYelse:return CATEGORY_CRATES", "docstring": "Extracts the category from an item.\n\n This backend generates two types of item: 'summary' and 'crate'.", "id": "f118:c0:m7"} {"signature": "@classmethoddef setup_cmd_parser(cls):", "body": "parser = BackendCommandArgumentParser(cls.BACKEND.CATEGORIES,from_date=True,archive=True,token_auth=True)group = parser.parser.add_argument_group('')group.add_argument('', dest='',default=SLEEP_TIME, type=int,help=\"\")return parser", "docstring": "Returns the Crates argument parser.", "id": "f118:c2:m0"} {"signature": "def fetch(self, category=CATEGORY_CRATES, from_date=DEFAULT_DATETIME):", "body": "if not from_date:from_date = DEFAULT_DATETIMEfrom_date = datetime_to_utc(from_date)kwargs = {\"\": from_date}items = super().fetch(category, **kwargs)return items", "docstring": "Fetch package data.\n\n The method retrieves packages and summary from Crates.io.\n\n :param category: the category of items to fetch\n :param from_date: obtain packages updated since this date\n\n :returns: a summary and crate items", "id": "f118:c0:m1"} {"signature": "def summary(self):", "body": "path = urijoin(CRATES_API_URL, CATEGORY_SUMMARY)raw_content = self.fetch(path)return raw_content", "docstring": "Get Crates.io summary", "id": "f118:c1:m1"} {"signature": "def get_question_answers(self, question_id):", "body": "page = KitsuneClient.FIRST_PAGEwhile True:api_answers_url = urijoin(self.base_url, '') + ''params = {\"\": page,\"\": question_id,\"\": \"\"}answers_raw = self.fetch(api_answers_url, params)yield answers_rawanswers = json.loads(answers_raw)if not answers['']:breakpage += ", "docstring": "Retrieve all answers for a question from older to newer (updated)", "id": "f119:c1:m2"} {"signature": "def get_questions(self, offset=None):", "body": "page = KitsuneClient.FIRST_PAGEif offset:page += int(offset / KitsuneClient.ITEMS_PER_PAGE)while True:api_questions_url = urijoin(self.base_url, '') + ''params = {\"\": page,\"\": \"\"}questions = self.fetch(api_questions_url, params)yield questionsquestions_json = json.loads(questions)next_uri = questions_json['']if not next_uri:breakpage += ", "docstring": "Retrieve questions from older to newer updated starting offset", "id": "f119:c1:m1"} {"signature": "def _init_client(self, from_archive=False):", "body": "return KitsuneClient(self.url, self.archive, from_archive)", "docstring": "Init client", "id": "f119:c0:m9"} {"signature": "def metadata(self, item, filter_classified=False):", "body": "item = super().metadata(item, filter_classified=filter_classified)item[''] = item[''].pop('')return item", "docstring": "Kitsune metadata.\n\n This method takes items overrides `metadata` method to add extra\n information related to Kitsune (offset of the question).\n\n :param item: an item fetched by a backend\n :param filter_classified: sets if classified fields were filtered", "id": "f119:c0:m3"} {"signature": "def get_token_from_post_data(self, data):", "body": "try:for x in ['', '', '']:if not data.get(x):raise TypeError(\"\".format(x))if '' in data:return self.refresh_token(**data)for x in ['', '']:if not data.get(x):raise TypeError(\"\".format(x)) return self.get_token(**data)except TypeError as exc:self._handle_exception(exc)return self._make_json_error_response('')except StandardError as exc:self._handle_exception(exc)return self._make_json_error_response('')", "docstring": "Get a token response from POST data.\n\n :param data: POST data containing authorization information.\n :type data: dict\n :rtype: requests.Response", "id": "f126:c1:m10"} {"signature": "def get_authorization_code_from_uri(self, uri):", "body": "params = utils.url_query_params(uri)try:if '' not in params:raise TypeError('')if '' not in params:raise TypeError('')if '' not in params:raise TypeError('')return self.get_authorization_code(**params)except TypeError as exc:self._handle_exception(exc)err = ''if '' in params:u = params['']return self._make_redirect_error_response(u, err)else:return self._invalid_redirect_uri_response()except StandardError as exc:self._handle_exception(exc)err = ''u = params['']return self._make_redirect_error_response(u, err)", "docstring": "Get authorization code response from a URI. This method will\n ignore the domain and path of the request, instead\n automatically parsing the query string parameters.\n\n :param uri: URI to parse for authorization information.\n :type uri: str\n :rtype: requests.Response", "id": "f126:c1:m9"} {"signature": "def _invalid_redirect_uri_response(self):", "body": "return self._make_json_error_response('')", "docstring": "What to return when the redirect_uri parameter is missing.\n\n :rtype: requests.Response", "id": "f126:c0:m5"} {"signature": "def get_token(self,grant_type,client_id,client_secret,redirect_uri,code,**params):", "body": "if grant_type != '':return self._make_json_error_response('')is_valid_client_id = self.validate_client_id(client_id)is_valid_client_secret = self.validate_client_secret(client_id,client_secret)is_valid_redirect_uri = self.validate_redirect_uri(client_id,redirect_uri)scope = params.get('', '')is_valid_scope = self.validate_scope(client_id, scope)data = self.from_authorization_code(client_id, code, scope)is_valid_grant = data is not Noneif not (is_valid_client_id and is_valid_client_secret):return self._make_json_error_response('')if not is_valid_grant or not is_valid_redirect_uri:return self._make_json_error_response('')if not is_valid_scope:return self._make_json_error_response('')self.discard_authorization_code(client_id, code)access_token = self.generate_access_token()token_type = self.token_typeexpires_in = self.token_expires_inrefresh_token = self.generate_refresh_token()self.persist_token_information(client_id=client_id,scope=scope,access_token=access_token,token_type=token_type,expires_in=expires_in,refresh_token=refresh_token,data=data)return self._make_json_response({'': access_token,'': token_type,'': expires_in,'': refresh_token})", "docstring": "Generate access token HTTP response.\n\n :param grant_type: Desired grant type. Must be \"authorization_code\".\n :type grant_type: str\n :param client_id: Client ID.\n :type client_id: str\n :param client_secret: Client secret.\n :type client_secret: str\n :param redirect_uri: Client redirect URI.\n :type redirect_uri: str\n :param code: Authorization code.\n :type code: str\n :rtype: requests.Response", "id": "f126:c1:m8"} {"signature": "@propertydef token_length(self):", "body": "return ", "docstring": "Property method to get the length used to generate tokens.\n\n :rtype: int", "id": "f126:c1:m0"} {"signature": "def _handle_exception(self, exc):", "body": "logger = logging.getLogger(__name__)logger.exception(exc)", "docstring": "Handle an internal exception that was caught and suppressed.\n\n :param exc: Exception to process.\n :type exc: Exception", "id": "f126:c0:m0"} {"signature": "def get_token(self, code, **params):", "body": "params[''] = codeif '' not in params:params[''] = self.default_grant_typeparams.update({'': self.client_id,'': self.client_secret,'': self.redirect_uri})response = self.http_post(self.token_uri, params)try:return response.json()except TypeError:return response.json", "docstring": "Get an access token from the provider token URI.\n\n :param code: Authorization code.\n :type code: str\n :return: Dict containing access token, refresh token, etc.\n :rtype: dict", "id": "f127:c0:m5"} {"signature": "def url_query_params(url):", "body": "return dict(urlparse.parse_qsl(urlparse.urlparse(url).query, True))", "docstring": "Return query parameters as a dict from the specified URL.\n\n :param url: URL.\n :type url: str\n :rtype: dict", "id": "f128:m1"} {"signature": "def setUp(self):", "body": "self.places = ctllib.Places(config='', messages='')def _cleanup():for d in self.places:if os.path.exists(d):shutil.rmtree(d)_cleanup()self.addCleanup(_cleanup)for d in self.places:os.mkdir(d)", "docstring": "Set up configuration and build/cleanup directories", "id": "f133:c1:m0"} {"signature": "def setUp(self):", "body": "self.parser = ctllib.PARSERself.base = ['', '', '', '']", "docstring": "Initialize the parser, required arguments", "id": "f133:c0:m0"} {"signature": "def jsonFrom(fname):", "body": "with io.open(fname, \"\", encoding='') as fp:return json.loads(fp.read())", "docstring": "Load JSON from a file", "id": "f133:m0"} {"signature": "def remove(self, name):", "body": "self.events.append(('', name))", "docstring": "Get a remove event", "id": "f136:c0:m2"} {"signature": "def setUp(self):", "body": "DirectoryBasedTest.setUp(self)self.receiver = EventRecorder()self.monitor = directory_monitor.checker(self.testDirectory,self.receiver)self.assertFalse(self.receiver.events)", "docstring": "Set up the test", "id": "f136:c6:m0"} {"signature": "def setUp(self):", "body": "def _cleanup(testDir):if os.path.exists(testDir):shutil.rmtree(testDir)self.testDirs = {}for subd in ['', '']:testDir = self.testDirs[subd] = os.path.join(os.getcwd(), subd)self.addCleanup(_cleanup, testDir)_cleanup(testDir)os.makedirs(testDir)self.my_reactor = test_procmon.DummyProcessReactor()self.service = service.get(self.testDirs[''],self.testDirs[''],, reactor=self.my_reactor)self._finishSetUp()", "docstring": "Set up the test", "id": "f138:c2:m0"} {"signature": "def setContent(self, content):", "body": "self.content = content", "docstring": "Set file contents", "id": "f138:c0:m2"} {"signature": "def request(self, method, url, headers, body):", "body": "d = defer.Deferred()self.calls.append((method, url, headers, body))self.pending[url].append(d)return d", "docstring": "Pretend to make a request", "id": "f142:c0:m1"} {"signature": "def getArgs(self):", "body": "return ''.join(''.join('' % (key, vpart)for vpart in value.split())for key, value in six.iteritems(self.args)).split()", "docstring": "Get the arguments as a list of strings", "id": "f144:c2:m1"} {"signature": "def checker(location, receiver):", "body": "path = filepath.FilePath(location)files = set()filesContents = {}def _check(path):currentFiles = set(fname for fname in os.listdir(location)if not fname.endswith(''))removed = files - currentFilesadded = currentFiles - filesfor fname in added:contents = path.child(fname).getContent()filesContents[fname] = contentsreceiver.add(fname, contents)for fname in removed:receiver.remove(fname)same = currentFiles & filesfor fname in same:newContents = path.child(fname).getContent()oldContents = filesContents[fname]if newContents == oldContents:continuereceiver.remove(fname)filesContents[fname] = newContentsreceiver.add(fname, newContents)files.clear()files.update(currentFiles)return functools.partial(_check, path)", "docstring": "Construct a function that checks a directory for process configuration\n\n The function checks for additions or removals\n of JSON process configuration files and calls the appropriate receiver\n methods.\n\n :param location: string, the directory to monitor\n :param receiver: IEventReceiver\n :returns: a function with no parameters", "id": "f145:m0"} {"signature": "def messages(location, receiver):", "body": "path = filepath.FilePath(location)def _check(path):messageFiles = path.globChildren('')for message in messageFiles:if message.basename().endswith(''):continuereceiver.message(message.getContent())message.remove()return functools.partial(_check, path)", "docstring": "Construct a function that checks a directory for messages\n\n The function checks for new messages and\n calls the appropriate method on the receiver. Sent messages are\n deleted.\n\n :param location: string, the directory to monitor\n :param receiver: IEventReceiver\n :returns: a function with no parameters", "id": "f145:m1"} {"signature": "def check(self):", "body": "if self.closed:raise ValueError(\"\")self._maybeReset()if self.url is None:return Falsereturn self._maybeCheck()", "docstring": "Check the state of HTTP", "id": "f146:c1:m4"} {"signature": "def markBad(self, dummyValue):", "body": "self.bad += ", "docstring": "Note an unsuccessful check", "id": "f146:c0:m2"} {"signature": "def markGood(self, dummyValue):", "body": "self.bad = ", "docstring": "Note a successful check", "id": "f146:c0:m3"} {"signature": "def makeService(opt):", "body": "restarter, path = beatcheck.parseConfig(opt)pool = client.HTTPConnectionPool(reactor)agent = client.Agent(reactor=reactor, pool=pool)settings = Settings(reactor=reactor, agent=agent)states = {}checker = functools.partial(check, settings, states, path)httpcheck = tainternet.TimerService(opt[''], run, restarter, checker)httpcheck.setName('')return heart.wrapHeart(httpcheck)", "docstring": "Make a service\n\n :params opt: dictionary-like object with 'freq', 'config' and 'messages'\n :returns: twisted.application.internet.TimerService that at opt['freq']\n checks for stale processes in opt['config'], and sends\n restart messages through opt['messages']", "id": "f146:m2"} {"signature": "def run(restarter, checker):", "body": "for bad in checker():restarter(bad)", "docstring": "Run restarter on the checker's output\n\n :params restarter: something to run on the output of the checker\n :params checker: a function expected to get one argument (current time)\n and return a list of stale names\n :params timer: a function of zero arguments, intended to return current\n time\n :returns: None", "id": "f146:m1"} {"signature": "def runProcess(args, timeout, grace, reactor):", "body": "deferred = defer.Deferred()protocol = ProcessProtocol(deferred)process = reactor.spawnProcess(protocol, args[], args, env=os.environ)def _logEnded(err):err.trap(tierror.ProcessDone, tierror.ProcessTerminated)print(err.value)deferred.addErrback(_logEnded)def _cancelTermination(dummy):for termination in terminations:if termination.active():termination.cancel()deferred.addCallback(_cancelTermination)terminations = []terminations.append(reactor.callLater(timeout, process.signalProcess,\"\"))terminations.append(reactor.callLater(timeout+grace,process.signalProcess, \"\"))return deferred", "docstring": "Run a process, return a deferred that fires when it is done\n\n :params args: Process arguments\n :params timeout: Time before terminating process\n :params grace: Time before killing process after terminating it\n :params reactor: IReactorProcess and IReactorTime\n :returns: deferred that fires with success when the process ends,\n or fails if there was a problem spawning/terminating\n the process", "id": "f148:m0"} {"signature": "def processExited(self, reason):", "body": "pass", "docstring": "Ignore processExited", "id": "f148:c0:m3"} {"signature": "def childConnectionLost(self, reason):", "body": "pass", "docstring": "Ignore childConnectionLoss", "id": "f148:c0:m4"} {"signature": "def replaceEnvironment(case, myEnv=None):", "body": "if myEnv is None:myEnv = buildEnv()oldEnviron = os.environdef _cleanup():os.environ = oldEnvironcase.addCleanup(_cleanup)os.environ = myEnv", "docstring": "Replace environment temporarily, restoring it at end of test\n\n :params myEnv: a dict-like object", "id": "f149:m0"} {"signature": "def maybeAddHeart(master):", "body": "heartSer = makeService()if heartSer is None:returnheartSer.setName('')heartSer.setServiceParent(master)", "docstring": "Add a heart to a service collection\n\n Add a heart to a service.IServiceCollector if\n the heart is not None.\n\n :params master: a service.IServiceCollector", "id": "f151:m1"} {"signature": "def message(self, contents):", "body": "contents = json.loads(contents.decode(''))tp = contents['']if tp == '':self.monitor.stopProcess(contents[''])log.msg(\"\", contents[''])elif tp == '':self.monitor.restartAll()log.msg(\"\")elif tp == '':log.msg(\"\", contents[''])for name in self._groupToProcess[contents['']]:log.msg(\"\", name)self.monitor.stopProcess(name)else:raise ValueError('', contents)", "docstring": "Respond to a restart or a restart-all message\n\n :params contents: string, contents of message\n parsed as JSON, and assumed to have a 'type'\n key, with value either 'restart' or 'restart-all'.\n If the value is 'restart', another key\n ('value') should exist with a logical process\n name.", "id": "f157:c0:m3"} {"signature": "def remove(self, name):", "body": "self.monitor.removeProcess(name)log.msg(\"\", name)for group in self._processToGroups.pop(name):self._groupToProcess[group].remove(name)", "docstring": "Remove a process\n\n :params name: string, name of process", "id": "f157:c0:m2"} {"signature": "@mainlib.COMMANDS.register(name='')def main(argv):", "body": "ns = PARSER.parse_args(argv[:])call(ns)", "docstring": "command-line entry point\n\n --messages: messages directory\n\n --config: configuration directory\n\n subcommands:\n add:\n name (positional)\n\n --cmd (required) -- executable\n\n --arg -- add an argument\n\n --env -- add an environment variable (VAR=value)\n\n --uid -- set uid\n\n --gid -- set gid\n\n --extras -- a JSON file with more parameters\n\n --env-inherit -- add an environment variable to inherit\n\n remove:\n name (positional)\n restart:\n name (positional)\n restart-all:\n no arguments", "id": "f158:m8"} {"signature": "def restart(places, name):", "body": "content = _dumps(dict(type='', name=name))_addMessage(places, content)", "docstring": "Restart a process\n\n :params places: a Places instance\n :params name: string, the logical name of the process\n :returns: None", "id": "f158:m4"} {"signature": "def makeService(opt):", "body": "restarter, path = parseConfig(opt)now = time.time()checker = functools.partial(check, path, now)beatcheck = tainternet.TimerService(opt[''], run, restarter,checker, time.time)beatcheck.setName('')return heart.wrapHeart(beatcheck)", "docstring": "Make a service\n\n :params opt: dictionary-like object with 'freq', 'config' and 'messages'\n :returns: twisted.application.internet.TimerService that at opt['freq']\n checks for stale processes in opt['config'], and sends\n restart messages through opt['messages']", "id": "f159:m4"} {"signature": "def run(restarter, checker, timer):", "body": "for bad in checker(timer()):restarter(bad)", "docstring": "Run restarter on the checker's output\n\n :params restarter: something to run on the output of the checker\n :params checker: a function expected to get one argument (current time)\n and return a list of stale names\n :params timer: a function of zero arguments, intended to return current\n time\n :returns: None", "id": "f159:m2"} {"signature": "def parseConfig(opt):", "body": "places = ctllib.Places(config=opt[''], messages=opt[''])restarter = functools.partial(ctllib.restart, places)path = filepath.FilePath(opt[''])return restarter, path", "docstring": "Parse configuration\n\n :params opt: dict-like object with config and messages keys\n :returns: restarter, path", "id": "f159:m3"} {"signature": "def hash_eth2(data: Union[bytes, bytearray]) -> Hash32:", "body": "return Hash32(keccak(data))", "docstring": "Return Keccak-256 hashed result.\nNote: it's a placeholder and we aim to migrate to a S[T/N]ARK-friendly hash function in\na future Ethereum 2.0 deployment phase.", "id": "f178:m0"} {"signature": "def create_access_request(pid_value, users, confirmed):", "body": "datastore = current_app.extensions[''].datastorereceiver = datastore.get_user(users[''][''])sender = datastore.get_user(users[''][''])return AccessRequest.create(recid=pid_value,receiver=receiver,sender_full_name=\"\",sender_email=\"\",sender=sender if confirmed else None,justification=\"\",)", "docstring": "Access Request.", "id": "f200:m0"} {"signature": "@propertydef engine(self):", "body": "if not hasattr(self, ''):from cryptography.fernet import Fernetfrom cryptography.hazmat.backends import default_backendfrom cryptography.hazmat.primitives import hashesdigest = hashes.Hash(hashes.SHA256(), backend=default_backend())digest.update(current_app.config[''].encode(''))fernet_key = urlsafe_b64encode(digest.finalize())self._engine = Fernet(fernet_key)return self._engine", "docstring": "Get cryptographic engine.", "id": "f208:c1:m0"} {"signature": "def __init__(self, expires_at=None, **kwargs):", "body": "assert isinstance(expires_at, datetime) or expires_at is Nonedt = expires_at - datetime.now() if expires_at else Nonesuper(TimedSecretLinkSerializer, self).__init__(current_app.config[''],expires_in=int(dt.total_seconds()) if dt else None,salt='',**kwargs)", "docstring": "Initialize underlying TimedJSONWebSignatureSerializer.", "id": "f208:c4:m0"} {"signature": "def validate_reject(form, field):", "body": "if field.data and form.accept.data:raise validators.ValidationError(_(\"\"))", "docstring": "Validate that accept have not been set.", "id": "f211:c1:m1"} {"signature": "def reverse(self, col):", "body": "if col in self.options:if self.is_selected(col):return col if not self.asc else ''.format(col)else:return colreturn None", "docstring": "Get reverse direction of ordering.", "id": "f213:c0:m1"} {"signature": "def __init__(self, options, selected):", "body": "self.options = optionsif selected in options:self._selected = selectedself.asc = Trueelif selected and selected[] == '' and selected[:] in options:self._selected = selected[:]self.asc = Falseelse:self._selected = Noneself.asc = None", "docstring": "Initialize ordering with possible options the selected option.\n\n :param options: List of column options.\n :param selected: Selected column. Prefix name with ``-`` to denote\n descending ordering.", "id": "f213:c0:m0"} {"signature": "def dir(self, col, asc='', desc=''):", "body": "if col == self._selected and self.asc is not None:return asc if self.asc else descelse:return None", "docstring": "Get direction (ascending/descending) of ordering.", "id": "f213:c0:m2"} {"signature": "def send_confirmed_notifications(request):", "body": "pid, record = get_record(request.recid)if record is None:current_app.logger.error(\"\"% request.recid)returntitle = _(\"\", record=record[\"\"])_send_notification(request.receiver.email,title,\"\",request=request,record=record,pid=pid,)_send_notification(request.sender_email,title,\"\",request=request,record=record,pid=pid,)", "docstring": "Receiver for request-confirmed signal to send email notification.", "id": "f214:m3"} {"signature": "def send_accept_notification(request, message=None, expires_at=None):", "body": "pid, record = get_record(request.recid)_send_notification(request.sender_email,_(\"\"),\"\",request=request,record=record,pid=pid,record_link=request.link.get_absolute_url(''),message=message,expires_at=expires_at,)", "docstring": "Receiver for request-accepted signal to send email notification.", "id": "f214:m2"} {"signature": "def create_secret_link(request, message=None, expires_at=None):", "body": "pid, record = get_record(request.recid)if not record:raise RecordNotFound(request.recid)description = render_template(\"\",request=request,record=record,pid=pid,expires_at=expires_at,message=message,)request.create_secret_link(record[\"\"],description=description,expires_at=expires_at)", "docstring": "Receiver for request-accepted signal.", "id": "f214:m1"} {"signature": "def _send_notification(to, subject, template, **ctx):", "body": "msg = Message(subject,sender=current_app.config.get(''),recipients=[to])msg.body = render_template(template, **ctx)send_email.delay(msg.__dict__)", "docstring": "Render a template and send as email.", "id": "f214:m6"} {"signature": "def __init__(self, icon=None):", "body": "self._icon = icon", "docstring": "Initialize button.", "id": "f215:c0:m0"} {"signature": "@classmethoddef query_by_owner(cls, user):", "body": "return cls.query.filter_by(owner_user_id=user.id)", "docstring": "Get secret links by user.", "id": "f217:c1:m2"} {"signature": "def is_valid(self):", "body": "return not(self.is_expired() or self.is_revoked())", "docstring": "Determine if link is still valid.", "id": "f217:c1:m8"} {"signature": "def secret_key():", "body": "return current_app.config[''].encode('')", "docstring": "Return secret key as bytearray.", "id": "f217:m0"} {"signature": "def confirm_email(self):", "body": "with db.session.begin_nested():if self.status != RequestStatus.EMAIL_VALIDATION:raise InvalidRequestStateError(RequestStatus.EMAIL_VALIDATION)self.status = RequestStatus.PENDINGrequest_confirmed.send(self)", "docstring": "Confirm that senders email is valid.", "id": "f217:c2:m3"} {"signature": "def confirm(pid, record, template, **kwargs):", "body": "recid = int(pid.pid_value)token = request.view_args['']data = EmailConfirmationSerializer.compat_validate_token(token)if data is None:flash(_(\"\"), category='')return redirect(url_for(\"\", pid_value=recid))r = AccessRequest.query.get(data[''])if not r:abort()if r.status != RequestStatus.EMAIL_VALIDATION:abort()r.confirm_email()db.session.commit()flash(_(\"\"), category='')return redirect(url_for(\"\", pid_value=recid))", "docstring": "Confirm email address.", "id": "f218:m4"} {"signature": "@blueprint.app_template_filter()@evalcontextfilterdef nl2br(eval_ctx, value):", "body": "result = u''.join(u'' % p.replace('', '')for p in _paragraph_re.split(escape(value)))if eval_ctx.autoescape:result = Markup(result)return result", "docstring": "Template filter to convert newlines to
-tags.", "id": "f220:m0"} {"signature": "def propagate_astrometry(self, phi, theta, parallax, muphistar, mutheta, vrad, t0, t1):", "body": "t = t1-t0p0, q0, r0 = normalTriad(phi, theta)pmra0 = muphistar*self.mastoradpmdec0 = mutheta*self.mastoradplx0 = parallax*self.mastoradpmr0 = vrad*parallax/auKmYearPerSec*self.mastoradpmtot0sqr = (muphistar** + mutheta**) * self.mastorad**pmvec0 = pmra0*p0+pmdec0*q0f = ( + *pmr0*t + (pmtot0sqr+pmr0**)*t**)**(-)u = (r0*(+pmr0*t) + pmvec0*t)*f_, phi1, theta1 = cartesianToSpherical(u[], u[], u[])parallax1 = parallax*fpmr1 = (pmr0+(pmtot0sqr + pmr0**)*t)*f**pmvec1 = (pmvec0*(+pmr0*t) - r0*pmr0***t)*f**p1, q1, r1 = normalTriad(phi1, theta1)muphistar1 = sum(p1*pmvec1/self.mastorad, axis=)mutheta1 = sum(q1*pmvec1/self.mastorad, axis =)murad1 = pmr1/self.mastoradreturn phi1, theta1, parallax1, muphistar1, mutheta1, murad1", "docstring": "Propagate the position of a source from the reference epoch t0 to the new epoch t1.\n\nParameters\n----------\n\nphi : float\n Longitude at reference epoch (radians).\ntheta : float\n Latitude at reference epoch (radians).\nparallax : float\n Parallax at the reference epoch (mas).\nmuphistar : float\n Proper motion in longitude (including cos(latitude) term) at reference epoch (mas/yr).\nmutheta : float\n Proper motion in latitude at reference epoch (mas/yr).\nvrad : float\n Radial velocity at reference epoch (km/s).\nt0 : float\n Reference epoch (Julian years).\nt1 : float\n New epoch (Julian years).\n\nReturns\n-------\n\nAstrometric parameters, including the \"radial proper motion\" (NOT the radial velocity), at the new epoch.\nphi1, theta1, parallax1, muphistar1, mutheta1, mur1 = epoch_prop_pos(..., t0, t1)", "id": "f224:c1:m1"} {"signature": "def propagate_astrometry_and_covariance_matrix(self, a0, c0, t0, t1):", "body": "zero, one, two, three = , , , tau = t1-t0p0, q0, r0 = normalTriad(a0[], a0[])par0 = a0[]*self.mastoradpma0 = a0[]*self.mastoradpmd0 = a0[]*self.mastoradpmr0 = a0[]*a0[]/auKmYearPerSec*self.mastoradpmvec0 = pma0*p0+pmd0*q0tau2 = tau*taupm02 = pma0** + pmd0**w = one + pmr0*tauf2 = one/(one + two*pmr0*tau + (pm02+pmr0**)*tau2)f = sqrt(f2)f3 = f2*ff4 = f2*f2u = (r0*w + pmvec0*tau)*f_, ra, dec = cartesianToSpherical(u[], u[], u[])par = par0*fpmvec = (pmvec0*(one+pmr0*tau) - r0*pmr0***tau)*f3pmr = (pmr0+(pm02 + pmr0**)*tau)*f2p, q, r = normalTriad(ra, dec)pma = sum(p*pmvec, axis=)pmd = sum(q*pmvec, axis =)a = zeros_like(a0)a[] = raa[] = deca[] = par/self.mastorada[] = pma/self.mastorada[] = pmd/self.mastorada[] = pmr/self.mastoradpmz = pmvec0*f - three*pmvec*wpp0 = sum(p*p0, axis=)pq0 = sum(p*q0, axis=)pr0 = sum(p*r0, axis=)qp0 = sum(q*p0, axis=)qq0 = sum(q*q0, axis=)qr0 = sum(q*r0, axis=)ppmz = sum(p*pmz, axis=)qpmz = sum(q*pmz, axis=)J = zeros_like(c0)if (c0.ndim==):J = J[newaxis,:,:]J[:,,] = pp0*w*f - pr0*pma0*tau*fJ[:,,] = pq0*w*f - pr0*pmd0*tau*fJ[:,,] = zeroJ[:,,] = pp0*tau*fJ[:,,] = pq0*tau*fJ[:,,] = -pma*tau2J[:,,] = qp0*w*f - qr0*pma0*tau*fJ[:,,] = qq0*w*f - qr0*pmd0*tau*fJ[:,,] = zeroJ[:,,] = qp0*tau*fJ[:,,] = qq0*tau*fJ[:,,] = -pmd*tau2J[:,,] = zeroJ[:,,] = zeroJ[:,,] = fJ[:,,] = -par*pma0*tau2*f2J[:,,] = -par*pmd0*tau2*f2J[:,,] = -par*w*tau*f2J[:,,] = -pp0*pm02*tau*f3 - pr0*pma0*w*f3J[:,,] = -pq0*pm02*tau*f3 - pr0*pmd0*w*f3J[:,,] = zeroJ[:,,] = pp0*w*f3 - two*pr0*pma0*tau*f3 - three*pma*pma0*tau2*f2J[:,,] = pq0*w*f3 - two*pr0*pmd0*tau*f3 - three*pma*pmd0*tau2*f2J[:,,] = ppmz*tau*f2J[:,,] = -qp0*pm02*tau*f3 - qr0*pma0*w*f3J[:,,] = -qq0*pm02*tau*f3 - qr0*pmd0*w*f3J[:,,] = zeroJ[:,,] = qp0*w*f3 - two*qr0*pma0*tau*f3 - three*pmd*pma0*tau2*f2J[:,,] = qq0*w*f3 - two*qr0*pmd0*tau*f3 - three*pmd*pmd0*tau2*f2J[:,,] = qpmz*tau*f2J[:,,] = zeroJ[:,,] = zeroJ[:,,] = zeroJ[:,,] = two*pma0*w*tau*f4J[:,,] = two*pmd0*w*tau*f4J[:,,] = (w** - pm02*tau2)*f4JT = zeros_like(J)for i in range(J.shape[]):JT[i] = J[i].Tif (c0.ndim==):c = matmul(J,matmul(c0[newaxis,:,:],JT))else:c = matmul(J,matmul(c0,JT))return a, squeeze(c)", "docstring": "Propagate the covariance matrix of the astrometric parameters and radial proper motion of a\nsource from epoch t0 to epoch t1.\n\nCode based on the Hipparcos Fortran implementation by Lennart Lindegren.\n\nParameters\n----------\n\na0 : array_like\n 6-element vector: (phi, theta, parallax, muphistar, mutheta, vrad) in units of (radians,\n radians, mas, mas/yr, mas/yr, km/s). Shape of a should be (6,) or (6,N), with N the number of\n sources for which the astrometric parameters are provided.\n\nc0 : array_like\n Covariance matrix stored in a 6x6 element array. This can be constructed from the columns\n listed in the Gaia catalogue. The units are [mas^2, mas^2/yr, mas^2/yr^2] for the various\n elements. Note that the elements in the 6th row and column should be:\n c[6,i]=c[i,6]=c[i,3]*vrad/auKmYearPerSec for i=1,..,5 and\n c[6,6]=c[3,3]*(vrad^2+vrad_error^2)/auKmYearPerSec^2+(parallax*vrad_error/auKmYearPerSec)^2\n\n Shape of c0 should be (6,6) or (N,6,6).\n\nt0 : float\n Reference epoch (Julian years).\n\nt1 : float\n New epoch (Julian years).\n\nReturns\n-------\n\nAstrometric parameters, including the \"radial proper motion\" (NOT the radial velocity), and\ncovariance matrix at the new epoch as a 2D matrix with the new variances on the diagional and the\ncovariance in the off-diagonal elements.", "id": "f224:c1:m3"} {"signature": "def transformCartesianCoordinates(self, x, y, z):", "body": "xrot, yrot, zrot = dot(self.rotationMatrix,[x,y,z])return xrot, yrot, zrot", "docstring": "Rotates Cartesian coordinates from one reference system to another using the rotation matrix with\nwhich the class was initialized. The inputs can be scalars or 1-dimensional numpy arrays.\n\nParameters\n----------\n\nx - Value of X-coordinate in original reference system\ny - Value of Y-coordinate in original reference system\nz - Value of Z-coordinate in original reference system\n\nReturns\n-------\n\nxrot - Value of X-coordinate after rotation\nyrot - Value of Y-coordinate after rotation\nzrot - Value of Z-coordinate after rotation", "id": "f224:c0:m1"} {"signature": "def __init__(self, desiredTransformation):", "body": "self.rotationMatrix=_rotationMatrixMap[desiredTransformation]self.transformationStrings=_transformationStringMap[desiredTransformation]", "docstring": "Class constructor/initializer. Sets the type of transformation to be used.\n\nParameters\n----------\n\ndesiredTransformation - The kind of coordinate transformation that should be provided. For example\nTransformations.GAL2ECL", "id": "f224:c0:m0"} {"signature": "def propagate_pos(self, phi, theta, parallax, muphistar, mutheta, vrad, t0, t1):", "body": "phi1, theta1, parallax1, muphistar1, mutheta1, vrad1 = self.epoch_prop_astrometry(phi, theta, parallax, muphistar, mutheta, vrad, t0, t1)return phi1, theta1", "docstring": "Propagate the position of a source from the reference epoch t0 to the new epoch t1.\n\nParameters\n----------\n\nphi : float\n Longitude at reference epoch (radians).\ntheta : float\n Latitude at reference epoch (radians).\nparallax : float\n Parallax at the reference epoch (mas).\nmuphistar : float\n Proper motion in longitude (including cos(latitude) term) at reference epoch (mas/yr).\nmutheta : float\n Proper motion in latitude at reference epoch (mas/yr).\nvrad : float\n Radial velocity at reference epoch (km/s).\nt0 : float\n Reference epoch (Julian years).\nt1 : float\n New epoch (Julian years).\n\nReturns\n-------\n\nCoordinates phi and theta at new epoch (in radians)", "id": "f224:c1:m2"} {"signature": "def transformSkyCoordinateErrors(self, phi, theta, sigPhiStar, sigTheta, rhoPhiTheta=):", "body": "if isscalar(rhoPhiTheta) and not isscalar(sigTheta):rhoPhiTheta=zeros_like(sigTheta)+rhoPhiThetac, s = self._getJacobian(phi,theta)cSqr = c*csSqr = s*scovar = sigPhiStar*sigTheta*rhoPhiThetavarPhiStar = sigPhiStar*sigPhiStarvarTheta = sigTheta*sigThetavarPhiStarRot = cSqr*varPhiStar+sSqr*varTheta+*covar*c*svarThetaRot = sSqr*varPhiStar+cSqr*varTheta-*covar*c*scovarRot = (cSqr-sSqr)*covar+c*s*(varTheta-varPhiStar)return sqrt(varPhiStarRot), sqrt(varThetaRot), covarRot/sqrt(varPhiStarRot*varThetaRot)", "docstring": "Converts the sky coordinate errors from one reference system to another, including the covariance\nterm. Equations (1.5.4) and (1.5.20) from section 1.5 in the Hipparcos Explanatory Volume 1 are used.\n\nParameters\n----------\n\nphi - The longitude-like angle of the position of the source (radians).\ntheta - The latitude-like angle of the position of the source (radians).\nsigPhiStar - Standard error in the longitude-like angle of the position of the source (radians or\n sexagesimal units, including cos(latitude) term)\nsigTheta - Standard error in the latitude-like angle of the position of the source (radians or\n sexagesimal units)\n\nKeywords (optional)\n-------------------\n\nrhoPhiTheta - Correlation coefficient of the position errors. Set to zero if this keyword is not\n provided.\n\nRetuns\n------\n\nsigPhiRotStar - The transformed standard error in the longitude-like angle (including\n cos(latitude) factor)\nsigThetaRot - The transformed standard error in the latitude-like angle.\nrhoPhiThetaRot - The transformed correlation coefficient.", "id": "f224:c0:m4"} {"signature": "def phaseSpaceToAstrometry(x, y, z, vx, vy, vz):", "body": "u, phi, theta = cartesianToSpherical(x, y, z)parallax = _auMasParsec/up, q, r = normalTriad(phi, theta)velocitiesArray=array([vx,vy,vz])if isscalar(u):muphistar=dot(p,velocitiesArray)*parallax/_auKmYearPerSecmutheta=dot(q,velocitiesArray)*parallax/_auKmYearPerSecvrad=dot(r,velocitiesArray)else:muphistar=zeros_like(parallax)mutheta=zeros_like(parallax)vrad=zeros_like(parallax)for i in range(parallax.size):muphistar[i]=dot(p[:,i],velocitiesArray[:,i])*parallax[i]/_auKmYearPerSecmutheta[i]=dot(q[:,i],velocitiesArray[:,i])*parallax[i]/_auKmYearPerSecvrad[i]=dot(r[:,i],velocitiesArray[:,i])return phi, theta, parallax, muphistar, mutheta, vrad", "docstring": "From the given phase space coordinates calculate the astrometric observables, including the radial\nvelocity, which here is seen as the sixth astrometric parameter. The phase space coordinates are\nassumed to represent barycentric (i.e. centred on the Sun) positions and velocities.\n\nThis function has no mechanism to deal with units. The velocity units are always assumed to be km/s,\nand the code is set up such that for positions in pc, the return units for the astrometry are radians,\nmilliarcsec, milliarcsec/year and km/s. For positions in kpc the return units are: radians,\nmicroarcsec, microarcsec/year, and km/s.\n\nNOTE that the doppler factor k=1/(1-vrad/c) is NOT used in the calculations. This is not a problem for\nsources moving at typical velocities of Galactic stars.\n\nParameters\n----------\n\nx - The x component of the barycentric position vector (in pc or kpc).\ny - The y component of the barycentric position vector (in pc or kpc).\nz - The z component of the barycentric position vector (in pc or kpc).\nvx - The x component of the barycentric velocity vector (in km/s).\nvy - The y component of the barycentric velocity vector (in km/s).\nvz - The z component of the barycentric velocity vector (in km/s).\n\nReturns\n-------\n\nphi - The longitude-like angle of the position of the source (radians).\ntheta - The latitude-like angle of the position of the source (radians).\nparallax - The parallax of the source (in mas or muas, see above)\nmuphistar - The proper motion in the longitude-like angle, multiplied by cos(theta) (mas/yr or muas/yr,\nsee above)\nmutheta - The proper motion in the latitude-like angle (mas/yr or muas/yr, see above)\nvrad - The radial velocity (km/s)", "id": "f227:m4"} {"signature": "def elementaryRotationMatrix(axis, rotationAngle):", "body": "if (axis==\"\" or axis==\"\"):return array([[, , ], [, cos(rotationAngle), sin(rotationAngle)], [,-sin(rotationAngle), cos(rotationAngle)]])elif (axis==\"\" or axis==\"\"):return array([[cos(rotationAngle), , -sin(rotationAngle)], [, , ], [sin(rotationAngle),, cos(rotationAngle)]])elif (axis==\"\" or axis==\"\"):return array([[cos(rotationAngle), sin(rotationAngle), ], [-sin(rotationAngle),cos(rotationAngle), ], [, , ]])else:raise Exception(\"\"+axis+\"\")", "docstring": "Construct an elementary rotation matrix describing a rotation around the x, y, or z-axis.\n\nParameters\n----------\n\naxis - Axis around which to rotate (\"x\", \"y\", or \"z\")\nrotationAngle - the rotation angle in radians\n\nReturns\n-------\n\nThe rotation matrix\n\nExample usage\n-------------\n\nrotmat = elementaryRotationMatrix(\"y\", pi/6.0)", "id": "f227:m3"} {"signature": "def sphericalToCartesian(r, phi, theta):", "body": "ctheta=cos(theta)x=r*cos(phi)*cthetay=r*sin(phi)*cthetaz=r*sin(theta)return x, y, z", "docstring": "Convert spherical to Cartesian coordinates. The input can be scalars or 1-dimensional numpy arrays.\nNote that the angle coordinates follow the astronomical convention of using elevation (declination,\nlatitude) rather than its complement (pi/2-elevation), where the latter is commonly used in the\nmathematical treatment of spherical coordinates.\n\nParameters\n----------\n\nr - length of input Cartesian vector.\nphi - longitude-like angle (e.g., right ascension, ecliptic longitude) in radians\ntheta - latitide-like angle (e.g., declination, ecliptic latitude) in radians\n\nReturns\n-------\n\nThe Cartesian vector components x, y, z", "id": "f227:m0"} {"signature": "def _generateRandomCovarianceMatrices(self, num):", "body": "varDiag1 = rand(num)*+varDiag2 = rand(num)*+rotAngle = *pi*rand(num)sigma1 = zeros(num)sigma2 = zeros(num)rho12 = zeros(num)for i in range(num): rotationMatrix=array([[cos(rotAngle[i]), sin(rotAngle[i])], [-sin(rotAngle[i]), cos(rotAngle[i])]])covMatrix = dot(rotationMatrix,dot(diag([varDiag1[i],varDiag2[i]]),transpose(rotationMatrix)))sigma1[i] = sqrt(covMatrix[,])sigma2[i] = sqrt(covMatrix[,])rho12[i] = covMatrix[,]/(sigma1[i]*sigma2[i])return sigma1, sigma2, rho12", "docstring": "Generate random covariance matrices through the transformation of diagonal positive definite\nmatrices.\n\nParameters\n----------\n\nnum - Number of matrices to generate.\n\nReturns\n-------\n\nsigma1 - Square root of variance of first variable.\nsigma2 - Square root of variance of second variable.\nrho12 - Correlation coefficient between errors on variables 1 and 2", "id": "f229:c0:m12"} {"signature": "def _is_pos_def(self, A):", "body": "if allclose(A, A.T, rtol=, atol=):try:cholesky(A)return Trueexcept LinAlgError:return Falseelse:return False", "docstring": "Check if matrix A is positive definite. Code from\nhttps://stackoverflow.com/questions/16266720/find-out-if-matrix-is-positive-definite-with-numpy", "id": "f229:c0:m13"} {"signature": "def rpMagnitudeError(G, vmini):", "body": "z=calcZBpRp(G)a = -*power(vmini,) + *vmini*vmini - *vmini + b = -*power(vmini,) + *vmini*vmini - *vmini + c = -*power(vmini,) + *vmini*vmini - *vmini - return *sqrt(power(,a)*z*z+power(,b)*z+power(,c))", "docstring": "Calculate the single-field-of-view-transit photometric standard error in the RP band as a function\nof G and (V-I). Note: this refers to the integrated flux from the RP spectrophotometer. A margin of 20%\nis included.\n\nParameters\n----------\n\nG - Value(s) of G-band magnitude.\nvmini - Value(s) of (V-I) colour.\n\nReturns\n-------\n\nThe RP band photometric standard error in units of magnitude.", "id": "f235:m4"} {"signature": "def calcZBpRp(G):", "body": "gatefloor=power(,*(-))if isscalar(G):result=amax((gatefloor,power(,*(G-))))else :result=power(,*(G-))indices=(resultresult[indices]=gatefloorreturn result", "docstring": "Calculate the value for the parameter z in the formula for the BP and RP magnitude errors as a\nfunction of G and (V-I).\n\nParameters\n----------\n\nG - Value of G-band magnitude.\n\nReturns\n-------\n\nValue of z for BP/RP.", "id": "f237:m1"} {"signature": "def vradErrorSkyAvg(vmag, spt):", "body": "return _vradCalibrationFloor + _vradErrorBCoeff[spt]*exp(_vradErrorACoeff[spt]*(vmag-_vradMagnitudeZeroPoint))", "docstring": "Calculate radial velocity error from V and the spectral type. The value of the error is an average over\nthe sky.\n\nParameters\n----------\n\nvmag - Value of V-band magnitude.\nspt - String representing the spectral type of the star.\n\nReturns\n-------\n\nThe radial velocity error in km/s.", "id": "f238:m0"} {"signature": "def positionMinError(G, vmini, extension=):", "body": "parallaxError = parallaxErrorSkyAvg(G, vmini, extension=extension)return _astrometricErrorFactors[''].min()*parallaxError,_astrometricErrorFactors[''].min()*parallaxError", "docstring": "Calculate the minimum position errors from G and (V-I). These correspond to the sky regions with the\nsmallest astrometric errors.\n\nNOTE! THE ERRORS ARE FOR SKY POSITIONS IN THE ICRS (I.E., RIGHT ASCENSION, DECLINATION). MAKE SURE YOUR\nSIMULATED ASTROMETRY IS ALSO ON THE ICRS.\n\nParameters\n----------\n\nG - Value(s) of G-band magnitude.\nvmini - Value(s) of (V-I) colour.\n\nKeywords\n--------\n\nextension - Add this amount of years to the mission lifetime and scale the errors accordingly.\n\nReturns\n-------\n\nThe minimum error in alpha* and the error in delta, in that order, in micro-arcsecond.", "id": "f239:m8"} {"signature": "def errorScalingMissionLength(extension, p):", "body": "return power((_nominalLifeTime+extension)/_nominalLifeTime, p)", "docstring": "Calculate the factor by which to scale errors for a given Gaia mission extension.\n\nParameters\n----------\n\nextension - The mission extension in years (a negative extension can be used to make crude\nperformance predictions part-way through the mission lifetime).\np - The power by which the errors scale with time (error ~ t^p, p=-0.5 for parallax and celestial\nposition, and -1.5 for proper motion).\n\nReturns\n-------\n\nThe factor by which to scale the errors", "id": "f239:m1"} {"signature": "def positionMaxError(G, vmini, extension=):", "body": "parallaxError = parallaxErrorSkyAvg(G, vmini, extension)return _astrometricErrorFactors[''].max()*parallaxError,_astrometricErrorFactors[''].max()*parallaxError", "docstring": "Calculate the maximum position errors from G and (V-I). These correspond to the sky regions with the\nlargest astrometric errors.\n\nNOTE! THE ERRORS ARE FOR SKY POSITIONS IN THE ICRS (I.E., RIGHT ASCENSION, DECLINATION). MAKE SURE YOUR\nSIMULATED ASTROMETRY IS ALSO ON THE ICRS.\n\nParameters\n----------\n\nG - Value(s) of G-band magnitude.\nvmini - Value(s) of (V-I) colour.\n\nKeywords\n--------\n\nextension - Add this amount of years to the mission lifetime and scale the errors accordingly.\n\nReturns\n-------\n\nThe maximum error in alpha* and the error in delta, in that order, in micro-arcsecond.", "id": "f239:m9"} {"signature": "def parallaxMaxError(G, vmini, extension=):", "body": "errors = _astrometricErrorFactors[\"\"].max()*parallaxErrorSkyAvg(G, vmini, extension=extension)indices = (errors<_parallaxErrorMaxBright)errors[indices]=_parallaxErrorMaxBrightreturn errors", "docstring": "Calculate the maximum parallax error from G and (V-I). This correspond to the sky regions with the\nlargest astrometric errors. At the bright end the parallax error is at least 14 muas due to the\ngating scheme.\n\nParameters\n----------\n\nG - Value(s) of G-band magnitude.\nvmini - Value(s) of (V-I) colour.\n\nKeywords\n--------\n\nextension - Add this amount of years to the mission lifetime and scale the errors accordingly.\n\nReturns\n-------\n\nThe maximum parallax error in micro-arcseconds.", "id": "f239:m4"} {"signature": "def parallaxMinError(G, vmini, extension=):", "body": "return _astrometricErrorFactors[\"\"].min()*parallaxErrorSkyAvg(G, vmini, extension=extension)", "docstring": "Calculate the minimum parallax error from G and (V-I). This correspond to the sky regions with the\nsmallest astrometric errors. At the bright end the parallax error is at least 14 muas due to the\ngating scheme.\n\nParameters\n----------\n\nG - Value(s) of G-band magnitude.\nvmini - Value(s) of (V-I) colour.\n\nKeywords\n--------\n\nextension - Add this amount of years to the mission lifetime and scale the errors accordingly.\n\nReturns\n-------\n\nThe minimum parallax error in micro-arcseconds.", "id": "f239:m3"} {"signature": "def degreesToRadians(angle):", "body": "return angle/*np.pi", "docstring": "Convert from degrees to radians.\n\nParameters\n----------\n\nangle - angle in degrees\n\nReturns\n-------\n\nAngle in radians.", "id": "f240:m1"} {"signature": "def parseCommandLineArguments():", "body": "parser = argparse.ArgumentParser(description=\"\"\"\"\"\")parser.add_argument(\"\", action=\"\", dest=\"\", help=\"\")parser.add_argument(\"\", action=\"\", dest=\"\", help=\"\")args=vars(parser.parse_args())return args", "docstring": "Set up command line parsing.", "id": "f244:m1"} {"signature": "def makePlot(pdf=False, png=False):", "body": "logdistancekpc = np.linspace(-,np.log10(),)sptVabsAndVmini=OrderedDict([('',(,)), ('',(,)), ('',(,)),('',(,)), ('',(,)), ('',(,))])lines={}fig=plt.figure(figsize=(,))currentAxis=plt.gca()for spt in sptVabsAndVmini.keys():vmag=sptVabsAndVmini[spt][]+*logdistancekpc+indices=(vmag>) & (vmag<)gmag=vmag+gminvFromVmini(sptVabsAndVmini[spt][])parerrors=parallaxErrorSkyAvg(gmag,sptVabsAndVmini[spt][])relparerrors=parerrors***logdistancekpc/plt.loglog(**logdistancekpc, relparerrors,'',lw=)plt.loglog(**logdistancekpc[indices], relparerrors[indices],'',label=spt)plt.xlim(,)plt.ylim(,)plt.text(, ,'',horizontalalignment='',verticalalignment='',transform = currentAxis.transAxes)plt.legend(loc=)plt.xlabel('')plt.ylabel('')plt.grid(which='')if (args['']):plt.savefig('')elif (args['']):plt.savefig('')else:plt.show()", "docstring": "Plot relative parallax errors as a function of distance for stars of a given spectral type.\n\nParameters\n----------\n\nargs - command line arguments", "id": "f244:m0"} {"signature": "def parseCommandLineArguments():", "body": "parser = argparse.ArgumentParser(description=\"\")parser.add_argument(\"\", dest=\"\", type=float, help=\"\", default=)parser.add_argument(\"\", action=\"\", dest=\"\", help=\"\")parser.add_argument(\"\", action=\"\", dest=\"\", help=\"\")args=vars(parser.parse_args())return args", "docstring": "Set up command line parsing.", "id": "f246:m1"} {"signature": "def makePlot(args):", "body": "gmag=np.linspace(,,)vmini = args['']vmag=gmag-gminvFromVmini(vmini)if args['']:sigmaG = gMagnitudeErrorEoM(gmag)sigmaGBp = bpMagnitudeErrorEoM(gmag, vmini)sigmaGRp = rpMagnitudeErrorEoM(gmag, vmini)yminmax = (-,)else:sigmaG = gMagnitudeError(gmag)sigmaGBp = bpMagnitudeError(gmag, vmini)sigmaGRp = rpMagnitudeError(gmag, vmini)yminmax = (-,)fig=plt.figure(figsize=(,))if (args['']):plt.semilogy(vmag, sigmaG, '', label='')plt.semilogy(vmag, sigmaGBp, '', label=''+''.format(vmini))plt.semilogy(vmag, sigmaGRp, '', label=''+''.format(vmini))plt.xlim((,))plt.legend(loc=)plt.xlabel('')else:ax=fig.add_subplot()plt.semilogy(gmag, sigmaG, '', label='')plt.semilogy(gmag, sigmaGBp, '', label=''+''.format(vmini))plt.semilogy(gmag, sigmaGRp, '', label=''+''.format(vmini))plt.xlim((,))plt.legend(loc=)plt.xlabel('')plt.xticks(np.arange(,,))ax = plt.gca().yaxis plt.grid(which='')plt.ylabel('')if args['']:plt.title(''.format(vmini), fontsize=)else:plt.title(''.format(vmini), fontsize=)basename = ''if (args['']):plt.savefig(basename+'')elif (args['']):plt.savefig(basename+'')else:plt.show()", "docstring": "Make the plot with photometry performance predictions.\n\n:argument args: command line arguments", "id": "f248:m0"} {"signature": "def makePlot(gmag, pdf=False, png=False, rvs=False):", "body": "vmini = np.linspace(-,,)if (rvs):gminv = -vminGrvsFromVmini(vmini)else:gminv = gminvFromVmini(vmini)mvlimit100pc = gmag-*np.log10()+-gminvmvlimit1kpc = gmag-*np.log10()+-gminvmvlimit10kpc = gmag-*np.log10()+-gminvfig=plt.figure(figsize=(,))plt.plot(vmini,mvlimit100pc,'')plt.text(vmini[]-,mvlimit100pc[],\"\", horizontalalignment='', va='')plt.plot(vmini,mvlimit1kpc,'')plt.text(vmini[]-,mvlimit1kpc[],\"\", horizontalalignment='', va='')plt.plot(vmini,mvlimit10kpc,'')plt.text(vmini[]-,mvlimit10kpc[],\"\", horizontalalignment='', va='')ax=plt.gca()ax.set_ylim(ax.get_ylim()[::-])plt.xlabel(\"\")plt.ylabel(\"\")if (rvs):plt.title(\"\"+\"\".format(gmag))else:plt.title(\"\".format(gmag))if (args['']):plt.savefig('')elif (args['']):plt.savefig('')else:plt.show()", "docstring": "Make a plot of a Mv vs (V-I) colour magnitude diagram containing lines of constant distance for stars\nat G=20. This will give an idea of the reach of Gaia.\n\nParameters\n----------\n\nargs - command line arguments", "id": "f249:m0"} {"signature": "def parseCommandLineArguments():", "body": "parser = argparse.ArgumentParser(description=\"\"\"\"\"\")parser.add_argument(\"\", help=\"\", type=float)parser.add_argument(\"\", action=\"\", dest=\"\", help=\"\")parser.add_argument(\"\", action=\"\", dest=\"\", help=\"\")parser.add_argument(\"\", action=\"\", dest=\"\", help=\"\")args=vars(parser.parse_args())return args", "docstring": "Set up command line parsing.", "id": "f249:m1"} {"signature": "def makePlot(args):", "body": "gRvs=np.linspace(,,)spts=['', '', '', '', '', '','', '', '', '', '']fig=plt.figure(figsize=(,))deltaHue = /(len(spts)-)hsv=np.zeros((,,))hsv[,,]=hsv[,,]=count=for spt in spts:hsv[,,]=(-count*deltaHue)/vmag = vminGrvsFromVmini(vminiFromSpt(spt)) + gRvsvradErrors = vradErrorSkyAvg(vmag, spt)plt.plot(vmag, vradErrors, '', label=spt, color=hsv_to_rgb(hsv)[,,:])count+=plt.grid(which='')plt.xlim(,)plt.ylim(,)plt.xticks(np.arange(,,))plt.yticks(np.arange(,,))plt.xlabel('')plt.ylabel('')leg=plt.legend(loc=, handlelength=, labelspacing=)for t in leg.get_texts():t.set_fontsize()if (args['']):plt.savefig('')elif (args['']):plt.savefig('')else:plt.show()", "docstring": "Make the plot with radial velocity performance predictions.\n\n:argument args: command line arguments", "id": "f250:m0"} {"signature": "def parseCommandLineArguments():", "body": "parser = argparse.ArgumentParser(description=\"\")parser.add_argument(\"\", action=\"\", dest=\"\", help=\"\")parser.add_argument(\"\", action=\"\", dest=\"\", help=\"\")parser.add_argument(\"\", action=\"\", dest=\"\", help=\"\")args=vars(parser.parse_args())return args", "docstring": "Set up command line parsing.", "id": "f250:m1"} {"signature": "def makePlot(args):", "body": "gmag=np.linspace(,,)vminiB1V=vminiFromSpt('')vminiG2V=vminiFromSpt('')vminiM6V=vminiFromSpt('')vmagB1V=gmag-gminvFromVmini(vminiB1V)vmagG2V=gmag-gminvFromVmini(vminiG2V)vmagM6V=gmag-gminvFromVmini(vminiM6V)sigmualphaB1V, sigmudeltaB1V = properMotionErrorSkyAvg(gmag,vminiB1V)sigmuB1V = np.sqrt(*sigmualphaB1V**+*sigmudeltaB1V**)sigmualphaB1V, sigmudeltaB1V = properMotionMinError(gmag,vminiB1V)sigmuB1Vmin = np.sqrt(*sigmualphaB1V**+*sigmudeltaB1V**)sigmualphaB1V, sigmudeltaB1V = properMotionMaxError(gmag,vminiB1V)sigmuB1Vmax = np.sqrt(*sigmualphaB1V**+*sigmudeltaB1V**)sigmualphaG2V, sigmudeltaG2V = properMotionErrorSkyAvg(gmag,vminiG2V)sigmuG2V = np.sqrt(*sigmualphaG2V**+*sigmudeltaG2V**)sigmualphaG2V, sigmudeltaG2V = properMotionMinError(gmag,vminiG2V)sigmuG2Vmin = np.sqrt(*sigmualphaG2V**+*sigmudeltaG2V**)sigmualphaG2V, sigmudeltaG2V = properMotionMaxError(gmag,vminiG2V)sigmuG2Vmax = np.sqrt(*sigmualphaG2V**+*sigmudeltaG2V**)sigmualphaM6V, sigmudeltaM6V = properMotionErrorSkyAvg(gmag,vminiM6V)sigmuM6V = np.sqrt(*sigmualphaM6V**+*sigmudeltaM6V**)sigmualphaM6V, sigmudeltaM6V = properMotionMinError(gmag,vminiM6V)sigmuM6Vmin = np.sqrt(*sigmualphaM6V**+*sigmudeltaM6V**)sigmualphaM6V, sigmudeltaM6V = properMotionMaxError(gmag,vminiM6V)sigmuM6Vmax = np.sqrt(*sigmualphaM6V**+*sigmudeltaM6V**)fig=plt.figure(figsize=(,))if (args['']):plt.semilogy(gmag, sigmuB1V, '', label='')plt.semilogy(gmag, sigmuG2V, '', label='')plt.semilogy(gmag, sigmuM6V, '', label='')plt.xlim((,))plt.ylim((,))plt.legend(loc=)else:ax=fig.add_subplot()plt.semilogy(vmagB1V, sigmuB1V, '', label='')plt.semilogy(vmagM6V, sigmuM6V, '', label='')plt.fill_between(vmagB1V, sigmuB1Vmin, sigmuB1Vmax, color='', alpha=)plt.fill_between(vmagM6V, sigmuM6Vmin, sigmuM6Vmax, color='', alpha=)plt.xlim((,))plt.ylim((,))plt.text(,,'',color='')plt.text(,,'',color='')plt.text(,,'', size=, bbox=dict(boxstyle=\"\",ec=(, , ),fc=(, , ),))plt.text(,,'', rotation=, size=, bbox=dict(boxstyle=\"\",ec=(, , ),fc=(, , ),))ax.annotate('', xy=(, ), xycoords='',xytext=(,), textcoords='', ha='', size='',bbox=dict(boxstyle=\"\",ec=(,,),fc=(,,)),arrowprops=dict(facecolor='', shrink=, width=,headwidth=),horizontalalignment='', verticalalignment='',)ax.annotate('', xy=(, ), xycoords='',xytext=(,), textcoords='', ha='', size='',arrowprops=dict(facecolor='', shrink=, width=,headwidth=),horizontalalignment='', verticalalignment='',)plt.xticks(np.arange(,,))ax = plt.gca().yaxis ax.set_major_formatter(matplotlib.ticker.ScalarFormatter())plt.ticklabel_format(axis='',style='')plt.grid(which='')plt.xlabel('')plt.ylabel('')basename = ''if (args['']):plt.savefig(basename+'')elif (args['']):plt.savefig(basename+'')else:plt.show()", "docstring": "Make the plot with proper motion performance predictions. The predictions are for the TOTAL proper\nmotion under the assumption of equal components mu_alpha* and mu_delta.\n\n:argument args: command line arguments", "id": "f251:m0"} {"signature": "def parseCommandLineArguments():", "body": "parser = argparse.ArgumentParser(description=\"\")parser.add_argument(\"\", help=\"\", type=float)parser.add_argument(\"\", help=\"\", type=float)args=vars(parser.parse_args())return args", "docstring": "Set up command line parsing.", "id": "f253:m1"} {"signature": "@gen.coroutine@query_params('', '', '','', '', '','', '', '','', '', '','', '', '','', '', '', '')def mlt(self, index, doc_type, id, body=None, params=None):", "body": "_, data = yield self.transport.perform_request('', _make_path(index, doc_type, id, ''),params=params, body=body)raise gen.Return(data)", "docstring": "Get documents that are \"like\" a specified document.\n``_\n\n:arg index: The name of the index\n:arg doc_type: The type of the document (use `_all` to fetch the first\n document matching the ID across all types)\n:arg id: The document ID\n:arg body: A specific search request definition\n:arg boost_terms: The boost factor\n:arg max_doc_freq: The word occurrence frequency as count: words with\n higher occurrence in the corpus will be ignored\n:arg max_query_terms: The maximum query terms to be included in the\n generated query\n:arg max_word_len: The minimum length of the word: longer words will\n be ignored\n:arg min_doc_freq: The word occurrence frequency as count: words with\n lower occurrence in the corpus will be ignored\n:arg min_term_freq: The term frequency as percent: terms with lower\n occurrence in the source document will be ignored\n:arg min_word_len: The minimum length of the word: shorter words will\n be ignored\n:arg mlt_fields: Specific fields to perform the query against\n:arg percent_terms_to_match: How many terms have to match in order to\n consider the document a match (default: 0.3)\n:arg routing: Specific routing value\n:arg search_from: The offset from which to return results\n:arg search_indices: A comma-separated list of indices to perform the\n query against (default: the index containing the document)\n:arg search_query_hint: The search query hint\n:arg search_scroll: A scroll search request definition\n:arg search_size: The number of documents to return (default: 10)\n:arg search_source: A specific search request definition (instead of\n using the request body)\n:arg search_type: Specific search type (eg. `dfs_then_fetch`, `count`,\n etc)\n:arg search_types: A comma-separated list of types to perform the query\n against (default: the same type as the document)\n:arg stop_words: A list of stop words to be ignored", "id": "f255:c2:m24"} {"signature": "@gen.coroutine@query_params('', '', '', '','')def count(self, index=None, doc_type=None, body=None, params=None):", "body": "_, data = yield self.transport.perform_request('',_make_path(index,doc_type,''),params=params, body=body)raise gen.Return(data)", "docstring": "Execute a query and get the number of matches for that query.\n``_\n\n:arg index: A comma-separated list of indices to restrict the results\n:arg doc_type: A comma-separated list of types to restrict the results\n:arg body: A query to restrict the results (optional)\n:arg ignore_indices: When performed on multiple indices, allows to\n ignore `missing` ones (default: none)\n:arg min_score: Include only documents with a specific `_score` value\n in the result\n:arg preference: Specify the node or shard the operation should be\n performed on (default: random)\n:arg routing: Specific routing value\n:arg source: The URL-encoded query definition (instead of using the\n request body)", "id": "f255:c2:m17"} {"signature": "@gen.coroutine@query_params('', '', '','', '', '','', '', '', '','', '', '', '', '')def explain(self, index, doc_type, id, body=None, params=None):", "body": "_, data = yield self.transport.perform_request('',_make_path(index,doc_type, id,''),params=params, body=body)raise gen.Return(data)", "docstring": "The explain api computes a score explanation for a query and a specific\ndocument. This can give useful feedback whether a document matches or\ndidn't match a specific query.\n``_\n\n:arg index: The name of the index\n:arg doc_type: The type of the document\n:arg id: The document ID\n:arg body: The query definition using the Query DSL\n:arg _source: True or false to return the _source field or not, or a\n list of fields to return\n:arg _source_exclude: A list of fields to exclude from the returned\n _source field\n:arg _source_include: A list of fields to extract and return from the\n _source field\n:arg analyze_wildcard: Specify whether wildcards and prefix queries in\n the query string query should be analyzed (default: false)\n:arg analyzer: The analyzer for the query string query\n:arg default_operator: The default operator for query string query (AND\n or OR), (default: OR)\n:arg df: The default field for query string query (default: _all)\n:arg fields: A comma-separated list of fields to return in the response\n:arg lenient: Specify whether format-based query failures (such as\n providing text to a numeric field) should be ignored\n:arg lowercase_expanded_terms: Specify whether query terms should be\n lowercased\n:arg parent: The ID of the parent document\n:arg preference: Specify the node or shard the operation should be\n performed on (default: random)\n:arg q: Query in the Lucene query string syntax\n:arg routing: Specific routing value\n:arg source: The URL-encoded query definition (instead of using the\n request body)", "id": "f255:c2:m13"} {"signature": "@gen.coroutine@query_params()def ping(self, params=None):", "body": "try:self.transport.perform_request('', '', params=params)except TransportError:raise gen.Return(False)raise gen.Return(True)", "docstring": "Returns True if the cluster is up, False otherwise.", "id": "f255:c2:m1"} {"signature": "@gen.coroutine@query_params('')def msearch(self, body, index=None, doc_type=None, params=None):", "body": "_, data = yield self.transport.perform_request('',_make_path(index,doc_type,''),params=params,body=self._bulk_body(body))raise gen.Return(data)", "docstring": "Execute several search requests within the same API.\n``_\n\n:arg body: The request definitions (metadata-search request definition\n pairs), separated by newlines\n:arg index: A comma-separated list of index names to use as default\n:arg doc_type: A comma-separated list of document types to use as default\n:arg search_type: Search operation type", "id": "f255:c2:m19"} {"signature": "@gen.coroutine@query_params('', '', '', '', '','', '', '', '', '','', '')def index(self, index, doc_type, body, id=None, params=None):", "body": "_, data = yield self.transport.perform_request('' if id else '', _make_path(index, doc_type, id),params=params, body=body)raise gen.Return(data)", "docstring": "Adds or updates a typed JSON document in a specific index, making it\nsearchable. ``_\n\n:arg index: The name of the index\n:arg doc_type: The type of the document\n:arg body: The document\n:arg id: Document ID\n:arg consistency: Explicit write consistency setting for the operation\n:arg op_type: Explicit operation type (default: index)\n:arg parent: ID of the parent document\n:arg percolate: Percolator queries to execute while indexing the doc\n:arg refresh: Refresh the index after performing the operation\n:arg replication: Specific replication type (default: sync)\n:arg routing: Specific routing value\n:arg timeout: Explicit operation timeout\n:arg timestamp: Explicit timestamp for the document\n:arg ttl: Expiration time for the document\n:arg version: Explicit version number for concurrency control\n:arg version_type: Specific version type", "id": "f255:c2:m5"} {"signature": "@gen.coroutine@query_params('', '', '')def bulk(self, body, index=None, doc_type=None, params=None):", "body": "_, data = yield self.transport.perform_request('',_make_path(index,doc_type,''),params=params,body=self._bulk_body(body))raise gen.Return(data)", "docstring": "Perform many index/delete operations in a single API call.\n``_\n\nSee the :func:`~elasticsearch.helpers.bulk_index` for a more friendly\nAPI.\n\n:arg body: The operation definition and data (action-data pairs)\n:arg index: Default index for items which don't provide one\n:arg doc_type: Default document type for items which don't provide one\n:arg consistency: Explicit write consistency setting for the operation\n:arg refresh: Refresh the index after performing the operation\n:arg replication: Explicitly set the replication type (efault: sync)", "id": "f255:c2:m18"} {"signature": "@gen.coroutine@query_params('', '', '','')def get_mapping(self, index=None, doc_type=None, params=None):", "body": "_, data = yield self.transport.perform_request('',_make_path(index,'',doc_type),params=params)raise gen.Return(data)", "docstring": "Retrieve mapping definition of index or index/type.\n``_\n:arg index: A comma-separated list of index names\n:arg doc_type: A comma-separated list of document types\n:arg allow_no_indices: Whether to ignore if a wildcard indices\n expression resolves into no concrete indices. (This includes `_all`\n string or when no indices have been specified)\n:arg expand_wildcards: Whether to expand wildcard expression to concrete\n indices that are open, closed or both., default 'open', valid\n choices are: 'open', 'closed', 'none', 'all'\n:arg ignore_unavailable: Whether specified concrete indices should be\n ignored when unavailable (missing or closed)\n:arg local: Return local information, do not retrieve the state from\n master node (default: false)", "id": "f255:c2:m21"} {"signature": "def recommendations(self, **kwargs):", "body": "path = self._get_id_path('')response = self._GET(path, kwargs)self._set_attrs_to_values(response)return response", "docstring": "Get the recommendations for TV series for a specific TV series id.\n\nArgs:\n page: (optional) Minimum value of 1. Expected value is an integer.\n language: (optional) ISO 639-1 code.\n\nReturns:\n A dict respresentation of the JSON returned from the API.", "id": "f272:c0:m9"} {"signature": "def rating(self, **kwargs):", "body": "path = self._get_id_path('')payload = {'': kwargs.pop('', None),}response = self._POST(path, kwargs, payload)self._set_attrs_to_values(response)return response", "docstring": "This method lets users rate a TV show. A valid session id or guest\nsession id is required.\n\nArgs:\n session_id: see Authentication.\n guest_session_id: see Authentication.\n value: Rating value.\n\nReturns:\n A dict respresentation of the JSON returned from the API.", "id": "f272:c0:m7"} {"signature": "def credits(self, **kwargs):", "body": "path = self._get_series_id_season_number_path('')response = self._GET(path, kwargs)self._set_attrs_to_values(response)return response", "docstring": "Get the cast & crew credits for a TV season by season number.\n\nReturns:\n A dict respresentation of the JSON returned from the API.", "id": "f272:c1:m2"} {"signature": "def translations(self, **kwargs):", "body": "path = self._get_id_path('')response = self._GET(path, kwargs)self._set_attrs_to_values(response)return response", "docstring": "Get the list of translations that exist for a TV series. These\ntranslations cascade down to the episode level.\n\nReturns:\n A dict respresentation of the JSON returned from the API.", "id": "f272:c0:m10"} {"signature": "def images(self, **kwargs):", "body": "path = self._get_id_path('')response = self._GET(path, kwargs)self._set_attrs_to_values(response)return response", "docstring": "Get the images (posters and backdrops) for a TV series.\n\nArgs:\n language: (optional) ISO 639 code.\n include_image_language: (optional) Comma separated, a valid\n ISO 69-1.\n\nReturns:\n A dict respresentation of the JSON returned from the API.", "id": "f272:c0:m6"} {"signature": "def external_ids(self, **kwargs):", "body": "path = self._get_series_id_season_number_path('')response = self._GET(path, kwargs)self._set_attrs_to_values(response)return response", "docstring": "Get the external ids that we have stored for a TV season by season\nnumber.\n\nArgs:\n language: (optional) ISO 639 code.\n\nReturns:\n A dict respresentation of the JSON returned from the API.", "id": "f272:c1:m3"} {"signature": "def airing_today(self, **kwargs):", "body": "path = self._get_path('')response = self._GET(path, kwargs)self._set_attrs_to_values(response)return response", "docstring": "Get the list of TV shows that air today. Without a specified timezone,\nthis query defaults to EST (Eastern Time UTC-05:00).\n\nArgs:\n page: (optional) Minimum 1, maximum 1000.\n language: (optional) ISO 639 code.\n timezone: (optional) Valid value from the list of timezones.\n\nReturns:\n A dict respresentation of the JSON returned from the API.", "id": "f272:c0:m14"} {"signature": "def collection(self, **kwargs):", "body": "path = self._get_path('')response = self._GET(path, kwargs)self._set_attrs_to_values(response)return response", "docstring": "Search for collections by name.\n\nArgs:\n query: CGI escpaed string.\n page: (optional) Minimum value of 1. Expected value is an integer.\n language: (optional) ISO 639-1 code.\n\nReturns:\n A dict respresentation of the JSON returned from the API.", "id": "f275:c0:m1"} {"signature": "def multi(self, **kwargs):", "body": "path = self._get_path('')response = self._GET(path, kwargs)self._set_attrs_to_values(response)return response", "docstring": "Search the movie, tv show and person collections with a single query.\n\nArgs:\n query: CGI escpaed string.\n page: (optional) Minimum value of 1. Expected value is an integer.\n language: (optional) ISO 639-1 code.\n include_adult: (optional) Toggle the inclusion of adult titles.\n Expected value is True or False.\n\nReturns:\n A dict respresentation of the JSON returned from the API.", "id": "f275:c0:m6"} {"signature": "def movie(self, **kwargs):", "body": "path = self._get_path('')response = self._GET(path, kwargs)self._set_attrs_to_values(response)return response", "docstring": "Search for movies by title.\n\nArgs:\n query: CGI escpaed string.\n page: (optional) Minimum value of 1. Expected value is an integer.\n language: (optional) ISO 639-1 code.\n include_adult: (optional) Toggle the inclusion of adult titles. \n Expected value is True or False.\n year: (optional) Filter the results release dates to matches that \n include this value.\n primary_release_year: (optional) Filter the results so that only \n the primary release dates have this value.\n search_type: (optional) By default, the search type is 'phrase'. \n This is almost guaranteed the option you will want. \n It's a great all purpose search type and by far the \n most tuned for every day querying. For those wanting \n more of an \"autocomplete\" type search, set this \n option to 'ngram'.\n\nReturns:\n A dict respresentation of the JSON returned from the API.", "id": "f275:c0:m0"} {"signature": "def keyword(self, **kwargs):", "body": "path = self._get_path('')response = self._GET(path, kwargs)self._set_attrs_to_values(response)return response", "docstring": "Search for keywords by name.\n\nArgs:\n query: CGI escpaed string.\n page: (optional) Minimum value of 1. Expected value is an integer.\n\nReturns:\n A dict respresentation of the JSON returned from the API.", "id": "f275:c0:m5"} {"signature": "def person(self, **kwargs):", "body": "path = self._get_path('')response = self._GET(path, kwargs)self._set_attrs_to_values(response)return response", "docstring": "Search for people by name.\n\nArgs:\n query: CGI escpaed string.\n page: (optional) Minimum value of 1. Expected value is an integer.\n include_adult: (optional) Toggle the inclusion of adult titles. \n Expected value is True or False.\n search_type: (optional) By default, the search type is 'phrase'. \n This is almost guaranteed the option you will want. \n It's a great all purpose search type and by far the \n most tuned for every day querying. For those wanting \n more of an \"autocomplete\" type search, set this \n option to 'ngram'.\n\nReturns:\n A dict respresentation of the JSON returned from the API.", "id": "f275:c0:m3"} {"signature": "def images(self, **kwargs):", "body": "path = self._get_id_path('')response = self._GET(path, kwargs)self._set_attrs_to_values(response)return response", "docstring": "Get all of the images for a particular collection by collection id.\n\nArgs:\n language: (optional) ISO 639-1 code.\n append_to_response: (optional) Comma separated, any movie method.\n include_image_language: (optional) Comma separated, a valid\n ISO 69-1.\n\nReturns:\n A dict representation of the JSON returned from the API.", "id": "f276:c1:m2"} {"signature": "def movies(self, **kwargs):", "body": "path = self._get_id_path('')response = self._GET(path, kwargs)self._set_attrs_to_values(response)return response", "docstring": "Get the list of movies associated with a particular company.\n\nArgs:\n page: (optional) Minimum value of 1. Expected value is an integer.\n language: (optional) ISO 639-1 code.\n append_to_response: (optional) Comma separated, any movie method.\n\nReturns:\n A dict representation of the JSON returned from the API.", "id": "f276:c2:m2"} {"signature": "def info(self, **kwargs):", "body": "path = self._get_id_path('')response = self._GET(path, kwargs)self._set_attrs_to_values(response)return response", "docstring": "Get the full details of a review by ID.\n\nReturns:\n A dict representation of the JSON returned from the API.", "id": "f276:c4:m1"} {"signature": "def now_playing(self, **kwargs):", "body": "path = self._get_path('')response = self._GET(path, kwargs)self._set_attrs_to_values(response)return response", "docstring": "Get the list of movies playing in theatres. This list refreshes\nevery day. The maximum number of items this list will include is 100.\n\nArgs:\n page: (optional) Minimum value of 1. Expected value is an integer.\n language: (optional) ISO 639-1 code.\n\nReturns:\n A dict representation of the JSON returned from the API.", "id": "f276:c0:m18"} {"signature": "def info(self, **kwargs):", "body": "path = self._get_id_path('')response = self._GET(path, kwargs)self._set_attrs_to_values(response)return response", "docstring": "This method is used to retrieve all of the basic information about a\ncompany.\n\nArgs:\n append_to_response: (optional) Comma separated, any movie method.\n\nReturns:\n A dict representation of the JSON returned from the API.", "id": "f276:c2:m1"} {"signature": "def alternative_titles(self, **kwargs):", "body": "path = self._get_id_path('')response = self._GET(path, kwargs)self._set_attrs_to_values(response)return response", "docstring": "Get the alternative titles for a specific movie id.\n\nArgs:\n country: (optional) ISO 3166-1 code.\n append_to_response: (optional) Comma separated, any movie method.\n\nReturns:\n A dict representation of the JSON returned from the API.", "id": "f276:c0:m2"} {"signature": "def rating(self, **kwargs):", "body": "path = self._get_id_path('')payload = {'': kwargs.pop('', None),}response = self._POST(path, kwargs, payload)self._set_attrs_to_values(response)return response", "docstring": "This method lets users rate a movie. A valid session id or guest\nsession id is required.\n\nArgs:\n session_id: see Authentication.\n guest_session_id: see Authentication.\n value: Rating value.\n\nReturns:\n A dict representation of the JSON returned from the API.", "id": "f276:c0:m22"} {"signature": "def popular(self, **kwargs):", "body": "path = self._get_path('')response = self._GET(path, kwargs)self._set_attrs_to_values(response)return response", "docstring": "Get the list of popular people on The Movie Database. This list \nrefreshes every day.\n\nArgs:\n page: (optional) Minimum 1, maximum 1000.\n\nReturns:\n A dict respresentation of the JSON returned from the API.", "id": "f277:c0:m8"} {"signature": "def info(self, **kwargs):", "body": "path = self._get_credit_id_path('')response = self._GET(path, kwargs)self._set_attrs_to_values(response)return response", "docstring": "Get the detailed information about a particular credit record. This is \ncurrently only supported with the new credit model found in TV. These \nids can be found from any TV credit response as well as the tv_credits \nand combined_credits methods for people.\n\nThe episodes object returns a list of episodes and are generally going \nto be guest stars. The season array will return a list of season \nnumbers. Season credits are credits that were marked with the \n\"add to every season\" option in the editing interface and are \nassumed to be \"season regulars\".\n\nArgs:\n language: (optional) ISO 639-1 code.\n\nReturns:\n A dict respresentation of the JSON returned from the API.", "id": "f277:c1:m1"} {"signature": "def tv_credits(self, **kwargs):", "body": "path = self._get_id_path('')response = self._GET(path, kwargs)self._set_attrs_to_values(response)return response", "docstring": "Get the TV credits for a specific person id.\n\nArgs:\n language: (optional) ISO 639-1 code.\n append_to_response: (optional) Comma separated, any person method.\n\nReturns:\n A dict respresentation of the JSON returned from the API.", "id": "f277:c0:m3"} {"signature": "def list(self, **kwargs):", "body": "path = self._get_path('')response = self._GET(path, kwargs)self._set_attrs_to_values(response)return response", "docstring": "Get the list of supported timezones for the API methods that support\nthem.\n\nReturns:\n A dict respresentation of the JSON returned from the API.", "id": "f279:c2:m0"} {"signature": "def list(self, **kwargs):", "body": "path = self._get_path('')response = self._GET(path, kwargs)self._set_attrs_to_values(response)return response", "docstring": "Get the list of supported certifications for movies.\n\nReturns:\n A dict respresentation of the JSON returned from the API.", "id": "f279:c1:m0"} {"signature": "def favorite(self, **kwargs):", "body": "path = self._get_id_path('')kwargs.update({'': self.session_id})payload = {'': kwargs.pop('', None), '': kwargs.pop('', None), '': kwargs.pop('', None),}response = self._POST(path, kwargs, payload)self._set_attrs_to_values(response)return response", "docstring": "Add or remove a movie to an accounts favorite list.\n\nArgs:\n media_type: 'movie' | 'tv'\n media_id: The id of the media.\n favorite: True (to add) | False (to remove).\n\nReturns:\n A dict respresentation of the JSON returned from the API.", "id": "f281:c0:m5"} {"signature": "def session_new(self, **kwargs):", "body": "path = self._get_path('')response = self._GET(path, kwargs)self._set_attrs_to_values(response)return response", "docstring": "Generate a session id for user based authentication.\n\nA session id is required in order to use any of the write methods.\n\nArgs:\n request_token: The token you generated for the user to approve.\n The token needs to be approved before being\n used here.\n\nReturns:\n A dict respresentation of the JSON returned from the API.", "id": "f281:c1:m2"} {"signature": "def add_item(self, **kwargs):", "body": "path = self._get_id_path('')kwargs.update({'': self.session_id})payload = {'': kwargs.pop('', None), }response = self._POST(path, kwargs, payload)self._set_attrs_to_values(response)return response", "docstring": "Add new movies to a list that the user created.\n\nA valid session id is required.\n\nArgs:\n media_id: A movie id.\n\nReturns:\n A dict respresentation of the JSON returned from the API.", "id": "f281:c3:m4"} {"signature": "def rated_movies(self, **kwargs):", "body": "path = self._get_id_path('')kwargs.update({'': self.session_id})response = self._GET(path, kwargs)self._set_attrs_to_values(response)return response", "docstring": "Get the list of rated movies (and associated rating) for an account.\n\nArgs:\n page: (optional) Minimum 1, maximum 1000.\n sort_by: (optional) 'created_at.asc' | 'created_at.desc'\n language: (optional) ISO 639-1 code.\n\nReturns:\n A dict respresentation of the JSON returned from the API.", "id": "f281:c0:m6"} {"signature": "def lists(self, **kwargs):", "body": "path = self._get_id_path('')kwargs.update({'': self.session_id})response = self._GET(path, kwargs)self._set_attrs_to_values(response)return response", "docstring": "Get the lists that you have created and marked as a favorite.\n\nArgs:\n page: (optional) Minimum 1, maximum 1000.\n language: (optional) ISO 639-1 code.\n\nReturns:\n A dict respresentation of the JSON returned from the API.", "id": "f281:c0:m2"} {"signature": "def info(self, **kwargs):", "body": "path = self._get_id_path('')response = self._GET(path, kwargs)self._set_attrs_to_values(response)return response", "docstring": "Get a list by id.\n\nReturns:\n A dict respresentation of the JSON returned from the API.", "id": "f281:c3:m1"} {"signature": "def rated_movies(self, **kwargs):", "body": "path = self._get_guest_session_id_path('')response = self._GET(path, kwargs)self._set_attrs_to_values(response)return response", "docstring": "Get a list of rated moview for a specific guest session id.\n\nArgs:\n page: (optional) Minimum 1, maximum 1000.\n sort_by: (optional) 'created_at.asc' | 'created_at.desc'\n language: (optional) ISO 639-1 code.\n\nReturns:\n A dict respresentation of the JSON returned from the API.", "id": "f281:c2:m1"} {"signature": "def movie(self, **kwargs):", "body": "path = self._get_path('')response = self._GET(path, kwargs)self._set_attrs_to_values(response)return response", "docstring": "Get a list of movie ids that have been edited.\n\nArgs:\n page: (optional) Minimum 1, maximum 1000.\n start_date: (optional) Expected format is 'YYYY-MM-DD'.\n end_date: (optional) Expected format is 'YYYY-MM-DD'.\n\nReturns:\n A dict respresentation of the JSON returned from the API.", "id": "f282:c0:m0"} {"signature": "def send_json(self, ids=None):", "body": "items = ids or self._registration_idvalues = {\"\": items}if self._data is not None:values[\"\"] = self._datafor key, val in self._kwargs.items():if val:values[key] = valdata = json.dumps(values, separators=(\"\", \"\"), sort_keys=True).encode(self.encoding)result = json.loads(self._send(data, \"\"))if (\"\" in result) and (result[\"\"]):unregistered = []throw_error = Falsefor index, error in enumerate(result.get(\"\", [])):error = error.get(\"\", \"\")if error in (\"\", \"\"):unregistered.append(items[index])elif error != \"\":throw_error = Trueself.deactivate_unregistered_devices(unregistered)if throw_error:raise GCMPushError(result)return result", "docstring": "Sends a json GCM message", "id": "f293:c0:m4"} {"signature": "def update_widget_data(self):", "body": "raise NotImplementedError", "docstring": "Implement this in your widget in order to update the widget's data.\n\nThis is the place where you would call some third party API, retrieve\nsome data and save it into your widget's model.", "id": "f307:c0:m9"} {"signature": "def save_setting(self, setting_name, value):", "body": "setting = self.get_setting(setting_name)if setting is None:setting = models.DashboardWidgetSettings.objects.create(widget_name=self.get_name(),setting_name=setting_name,value=value)setting.value = valuesetting.save()return setting", "docstring": "Saves the setting value into the database.", "id": "f307:c0:m6"} {"signature": "def get_last_update(self):", "body": "instance, created =models.DashboardWidgetLastUpdate.objects.get_or_create(widget_name=self.get_name())return instance", "docstring": "Gets or creates the last update object for this widget.", "id": "f307:c0:m2"} {"signature": "def get_context_data(self):", "body": "return {'': True, }", "docstring": "Should return a dictionary of template context variables that are\nneeded to render this widget.", "id": "f307:c0:m1"} {"signature": "def get_name(self):", "body": "if hasattr(self, ''):return self.widget_namereturn self.__class__.__name__", "docstring": "Returns the class name of this widget.\n\nBe careful when overriding this. If ``self.widget_name`` is set, you\nshould always return that in order to allow to register this widget\nclass several times with different names.", "id": "f307:c0:m3"} {"signature": "def convert_context_to_json(self, context):", "body": "return json.dumps(context)", "docstring": "Convert the context dictionary into a JSON object", "id": "f314:c0:m2"} {"signature": "def render_to_response(self, context):", "body": "return self.get_json_response(self.convert_context_to_json(context))", "docstring": "Returns a JSON response containing 'context' as payload", "id": "f314:c0:m0"} {"signature": "def get_json_response(self, content, **httpresponse_kwargs):", "body": "return http.HttpResponse(content,content_type='',**httpresponse_kwargs)", "docstring": "Construct an `HttpResponse` object.", "id": "f314:c0:m1"} {"signature": "def permission_required(perm, login_url=None, raise_exception=False):", "body": "def check_perms(user):if not getattr(settings, '',app_settings.REQUIRE_LOGIN):return Trueif user.has_perm(perm):return Trueif raise_exception: raise PermissionDeniedreturn Falsereturn user_passes_test(check_perms, login_url=login_url)", "docstring": "Re-implementation of the permission_required decorator, honors settings.\n\nIf ``DASHBOARD_REQUIRE_LOGIN`` is False, this decorator will always return\n``True``, otherwise it will check for the permission as usual.", "id": "f316:m0"} {"signature": "def unregister_widget(self, widget_cls):", "body": "if widget_cls.__name__ in self.widgets:del self.widgets[widget_cls().get_name()]", "docstring": "Unregisters the given widget.", "id": "f317:c0:m7"} {"signature": "def set_occupancy_modes(self, index, auto_away=None, follow_me=None):", "body": "body = {'': {'': '','': self.thermostats[index]['']},'': {'': {'': auto_away,'': follow_me}}}log_msg_action = ''return self.make_request(body, log_msg_action)", "docstring": "Enable/disable Smart Home/Away and Follow Me modes\n Values: True, False", "id": "f322:c0:m20"} {"signature": "def send_message(self, index, message=\"\"):", "body": "body = {\"\": {\"\": \"\",\"\": self.thermostats[index]['']},\"\": [{\"\": \"\", \"\": {\"\": message[:]}}]}log_msg_action = \"\"return self.make_request(body, log_msg_action)", "docstring": "Send a message to the thermostat", "id": "f322:c0:m17"} {"signature": "def request_tokens(self):", "body": "url = ''params = {'': '', '': self.authorization_code,'': self.api_key}try:request = requests.post(url, params=params)except RequestException:logger.warn(\"\"\"\")returnif request.status_code == requests.codes.ok:self.access_token = request.json()['']self.refresh_token = request.json()['']self.write_tokens_to_file()self.pin = Noneelse:logger.warn('''' + str(request.status_code))return", "docstring": "Method to request API tokens from ecobee", "id": "f322:c0:m2"} {"signature": "def set_dst_mode(self, index, dst):", "body": "body = {'': {'': '','': self.thermostats[index]['']},'': {'': {'': dst}}}log_msg_action = ''return self.make_request(body, log_msg_action)", "docstring": "Enable/disable daylight savings\n Values: True, False", "id": "f322:c0:m21"} {"signature": "def set_humidity(self, index, humidity):", "body": "body = {\"\": {\"\": \"\",\"\": self.thermostats[index]['']},\"\": {\"\": {\"\": int(humidity)}}}log_msg_action = \"\"return self.make_request(body, log_msg_action)", "docstring": "Set humidity level", "id": "f322:c0:m18"} {"signature": "def delete_vacation(self, index, vacation):", "body": "body = {\"\": {\"\": \"\",\"\": self.thermostats[index]['']},\"\": [{\"\": \"\", \"\": {\"\": vacation}}]}log_msg_action = \"\"return self.make_request(body, log_msg_action)", "docstring": "Delete the vacation with name vacation", "id": "f322:c0:m15"} {"signature": "def get_fake_model(fields=None, model_base=PostgresModel, meta_options={}):", "body": "model = define_fake_model(fields, model_base, meta_options)class TestProject:def clone(self, *_args, **_kwargs):return self@propertydef apps(self):return selfclass TestMigration(migrations.Migration):operations = [HStoreExtension()]with connection.schema_editor() as schema_editor:migration_executor = MigrationExecutor(schema_editor.connection)migration_executor.apply_migration(TestProject(), TestMigration('', ''))schema_editor.create_model(model)return model", "docstring": "Creates a fake model to use during unit tests.", "id": "f325:m1"} {"signature": "@contextmanagerdef add_field(field, filters: List[str]):", "body": "model = define_fake_model()project = migrations.state.ProjectState.from_apps(apps)with connection.schema_editor() as schema_editor:execute_migration(schema_editor, [migrations.CreateModel(model.__name__,fields=[])], project)with filtered_schema_editor(*filters) as (schema_editor, calls):execute_migration(schema_editor, [migrations.AddField(model.__name__,'',field)], project)yield calls", "docstring": "Adds the specified field to a model.\n\n Arguments:\n field:\n The field to add to a model.\n\n filters:\n List of strings to filter\n SQL statements on.", "id": "f338:m4"} {"signature": "@contextmanagerdef create_drop_model(field, filters: List[str]):", "body": "model = define_fake_model({'': field})with filtered_schema_editor(*filters) as (schema_editor, calls):execute_migration(schema_editor, [migrations.CreateModel(model.__name__,fields=[('', field.clone())]),migrations.DeleteModel(model.__name__,)])yield calls", "docstring": "Creates and drops a model with the specified field.\n\n Arguments:\n field:\n The field to include on the\n model to create and drop.\n\n filters:\n List of strings to filter\n SQL statements on.", "id": "f338:m2"} {"signature": "@contextmanagerdef alter_field(old_field, new_field, filters: List[str]):", "body": "model = define_fake_model({'': old_field})project = migrations.state.ProjectState.from_apps(apps)with connection.schema_editor() as schema_editor:execute_migration(schema_editor, [migrations.CreateModel(model.__name__,fields=[('', old_field.clone())])], project)with filtered_schema_editor(*filters) as (schema_editor, calls):execute_migration(schema_editor, [migrations.AlterField(model.__name__,'',new_field)], project)yield calls", "docstring": "Alters a field from one state to the other.\n\n Arguments:\n old_field:\n The field before altering it.\n\n new_field:\n The field after altering it.\n\n filters:\n List of strings to filter\n SQL statements on.", "id": "f338:m6"} {"signature": "def _form_returning(self):", "body": "qn = self.connection.ops.quote_namereturn '' % qn(self.query.model._meta.pk.attname)", "docstring": "Builds the RETURNING part of the query.", "id": "f347:c0:m3"} {"signature": "def _rewrite_insert(self, sql, params, return_id=False):", "body": "returning = self.qn(self.query.model._meta.pk.attname) if return_id else ''if self.query.conflict_action.value == '':return self._rewrite_insert_update(sql, params, returning)elif self.query.conflict_action.value == '':return self._rewrite_insert_nothing(sql, params, returning)raise SuspiciousOperation(('''') % str(self.query.conflict_action))", "docstring": "Rewrites a formed SQL INSERT query to include\n the ON CONFLICT clause.\n\n Arguments:\n sql:\n The SQL INSERT query to rewrite.\n\n params:\n The parameters passed to the query.\n\n returning:\n What to put in the `RETURNING` clause\n of the resulting query.\n\n Returns:\n A tuple of the rewritten SQL query and new params.", "id": "f347:c1:m3"} {"signature": "def _get_model_field(self, name: str):", "body": "field_name = self._normalize_field_name(name)if field_name == '' and self.query.model._meta.pk:return self.query.model._meta.pkfor field in self.query.model._meta.local_concrete_fields:if field.name == field_name or field.column == field_name:return fieldreturn None", "docstring": "Gets the field on a model with the specified name.\n\n Arguments:\n name:\n The name of the field to look for.\n\n This can be both the actual field name, or\n the name of the column, both will work :)\n\n Returns:\n The field with the specified name or None if\n no such field exists.", "id": "f347:c1:m7"} {"signature": "def __init__(self, value):", "body": "self.value = value", "docstring": "Initializes a new instance.", "id": "f349:c0:m0"} {"signature": "def __init__(self, name: str, key: str):", "body": "super().__init__(name)self.key = key", "docstring": "Initializes a new instance of :see:HStoreRef.\n\n Arguments:\n name:\n The name of the column/field to resolve.\n\n key:\n The name of the HStore key to select.", "id": "f349:c2:m0"} {"signature": "def resolve_expression(self, *args, **kwargs):", "body": "result = dict()for key, value in self.value.items():if hasattr(value, ''):result[key] = value.resolve_expression(*args, **kwargs)else:result[key] = valuereturn HStoreValue(result)", "docstring": "Resolves expressions inside the dictionary.", "id": "f349:c0:m1"} {"signature": "def __init__(self, alias, target, hstore_key):", "body": "super().__init__(alias, target, output_field=target)self.alias, self.target, self.hstore_key = alias, target, hstore_key", "docstring": "Initializes a new instance of :see:HStoreColumn.\n\n Arguments:\n alias:\n The table name.\n\n target:\n The field instance.\n\n hstore_key\n The name of the hstore key to include\n in the epxression.", "id": "f349:c1:m0"} {"signature": "def __repr__(self):", "body": "return \"\".format(self.__class__.__name__,self.alias,self.target,self.hstore_key)", "docstring": "Gets a textual representation of this expresion.", "id": "f349:c1:m1"} {"signature": "def insert(self, **fields):", "body": "if self.conflict_target or self.conflict_action:compiler = self._build_insert_compiler([fields])rows = compiler.execute_sql(return_id=True)pk_field_name = self.model._meta.pk.namereturn rows[][pk_field_name]return super().create(**fields).pk", "docstring": "Creates a new record in the database.\n\n This allows specifying custom conflict behavior using .on_conflict().\n If no special behavior was specified, this uses the normal Django create(..)\n\n Arguments:\n fields:\n The fields of the row to create.\n\n Returns:\n The primary key of the record that was created.", "id": "f353:c0:m7"} {"signature": "def join(self, **conditions):", "body": "self.query.add_join_conditions(conditions)return self", "docstring": "Adds extra conditions to existing joins.\n\n WARNING: This is an extremely experimental feature.\n DO NOT USE unless you know what you're doing.", "id": "f353:c0:m3"} {"signature": "def upsert(self, conflict_target: List, fields: Dict, index_predicate: str=None) -> int:", "body": "return self.get_queryset().upsert(conflict_target, fields, index_predicate)", "docstring": "Creates a new record or updates the existing one\n with the specified data.\n\n Arguments:\n conflict_target:\n Fields to pass into the ON CONFLICT clause.\n\n fields:\n Fields to insert/update.\n\n index_predicate:\n The index predicate to satisfy an arbiter partial index.\n\n Returns:\n The primary key of the row that was created/updated.", "id": "f353:c1:m4"} {"signature": "def bulk_upsert(self, conflict_target: List, rows: List[Dict], index_predicate: str=None):", "body": "return self.get_queryset().bulk_upsert(conflict_target, rows, index_predicate)", "docstring": "Creates a set of new records or updates the existing\n ones with the specified data.\n\n Arguments:\n conflict_target:\n Fields to pass into the ON CONFLICT clause.\n\n index_predicate:\n The index predicate to satisfy an arbiter partial index.\n\n rows:\n Rows to upsert.", "id": "f353:c1:m6"} {"signature": "def __init__(self, *args, **kwargs):", "body": "super(PostgresManager, self).__init__(*args, **kwargs)db_backend = settings.DATABASES['']['']if '' not in db_backend:raise ImproperlyConfigured(('''''') % db_backend)django.db.models.signals.post_save.connect(self._on_model_save, sender=self.model, weak=False)django.db.models.signals.pre_delete.connect(self._on_model_delete, sender=self.model, weak=False)self._signals_connected = True", "docstring": "Initializes a new instance of :see:PostgresManager.", "id": "f353:c1:m0"} {"signature": "def upsert(self, conflict_target: List, fields: Dict, index_predicate: str=None) -> int:", "body": "self.on_conflict(conflict_target, ConflictAction.UPDATE, index_predicate)return self.insert(**fields)", "docstring": "Creates a new record or updates the existing one\n with the specified data.\n\n Arguments:\n conflict_target:\n Fields to pass into the ON CONFLICT clause.\n\n fields:\n Fields to insert/update.\n\n index_predicate:\n The index predicate to satisfy an arbiter partial index (i.e. what partial index to use for checking\n conflicts)\n\n Returns:\n The primary key of the row that was created/updated.", "id": "f353:c0:m9"} {"signature": "def on_conflict(self, fields: List[Union[str, Tuple[str]]], action, index_predicate: str=None):", "body": "return self.get_queryset().on_conflict(fields, action, index_predicate)", "docstring": "Sets the action to take when conflicts arise when attempting\n to insert/create a new row.\n\n Arguments:\n fields:\n The fields the conflicts can occur in.\n\n action:\n The action to take when the conflict occurs.\n\n index_predicate:\n The index predicate to satisfy an arbiter partial index.", "id": "f353:c1:m3"} {"signature": "def upsert_and_get(self, conflict_target: List, fields: Dict, index_predicate: str=None):", "body": "self.on_conflict(conflict_target, ConflictAction.UPDATE, index_predicate)return self.insert_and_get(**fields)", "docstring": "Creates a new record or updates the existing one\n with the specified data and then gets the row.\n\n Arguments:\n conflict_target:\n Fields to pass into the ON CONFLICT clause.\n\n fields:\n Fields to insert/update.\n\n index_predicate:\n The index predicate to satisfy an arbiter partial index (i.e. what partial index to use for checking\n conflicts)\n\n Returns:\n The model instance representing the row\n that was created/updated.", "id": "f353:c0:m10"} {"signature": "def insert_and_get(self, **fields):", "body": "if not self.conflict_target and not self.conflict_action:return super().create(**fields)compiler = self._build_insert_compiler([fields])rows = compiler.execute_sql(return_id=False)columns = rows[]model_columns = {}for field in self.model._meta.local_concrete_fields:model_columns[field.column] = field.attnamemodel_init_fields = {}for column_name, column_value in columns.items():try:model_init_fields[model_columns[column_name]] = column_valueexcept KeyError:passreturn self.model(**model_init_fields)", "docstring": "Creates a new record in the database and then gets\n the entire row.\n\n This allows specifying custom conflict behavior using .on_conflict().\n If no special behavior was specified, this uses the normal Django create(..)\n\n Arguments:\n fields:\n The fields of the row to create.\n\n Returns:\n The model instance representing the row that was created.", "id": "f353:c0:m8"} {"signature": "def __init__(self, *args, **kwargs):", "body": "super().__init__(*args, **kwargs)self.join_type = ''self.extra_conditions = []", "docstring": "Initializes a new instance of :see:ConditionalJoin.", "id": "f356:c0:m0"} {"signature": "def __init__(self, condition: str, fields=[], name=None):", "body": "super().__init__(fields=fields, name=name)self.condition = condition", "docstring": "Initializes a new instance of :see:ConditionalUniqueIndex.", "id": "f359:c0:m0"} {"signature": "def delete_model(self, model):", "body": "for field in model._meta.local_fields:if not isinstance(field, HStoreField):continueself.remove_field(model, field)", "docstring": "Ran when a model is being deleted.", "id": "f361:c0:m1"} {"signature": "def alter_db_table(self, model, old_db_table, new_db_table):", "body": "for field in model._meta.local_fields:if not isinstance(field, HStoreField):continuefor key in self._iterate_required_keys(field):self._rename_hstore_required(old_db_table,new_db_table,field,field,key)", "docstring": "Ran when the name of a model is changed.", "id": "f361:c0:m2"} {"signature": "def alter_field(self, model, old_field, new_field, strict=False):", "body": "is_old_field_hstore = isinstance(old_field, HStoreField)is_new_field_hstore = isinstance(new_field, HStoreField)if not is_old_field_hstore and not is_new_field_hstore:returnold_uniqueness = getattr(old_field, '', []) or []new_uniqueness = getattr(new_field, '', []) or []if str(old_field.column) != str(new_field.column):for keys in self._iterate_uniqueness_keys(old_field):self._rename_hstore_unique(model._meta.db_table,model._meta.db_table,old_field,new_field,keys)for keys in old_uniqueness:if keys not in new_uniqueness:self._drop_hstore_unique(model,old_field,self._compose_keys(keys))for keys in new_uniqueness:if keys not in old_uniqueness:self._create_hstore_unique(model,new_field,self._compose_keys(keys))", "docstring": "Ran when the configuration on a field changed.", "id": "f362:c0:m5"} {"signature": "def alter_db_table(self, model, old_db_table, new_db_table):", "body": "for field in model._meta.local_fields:if not isinstance(field, HStoreField):continuefor keys in self._iterate_uniqueness_keys(field):self._rename_hstore_unique(old_db_table,new_db_table,field,field,keys)", "docstring": "Ran when the name of a model is changed.", "id": "f362:c0:m2"} {"signature": "def remove_field(self, model, field):", "body": "for keys in self._iterate_uniqueness_keys(field):self._drop_hstore_unique(model,field,keys)", "docstring": "Ran when a field is removed from a model.", "id": "f362:c0:m4"} {"signature": "@staticmethoddef _unique_constraint_name(table: str, field, keys):", "body": "postfix = ''.join(keys)return ''.format(table=table,field=field.column,postfix=postfix)", "docstring": "Gets the name for a UNIQUE INDEX that applies\n to one or more keys in a hstore field.\n\n Arguments:\n table:\n The name of the table the field is\n a part of.\n\n field:\n The hstore field to create a\n UNIQUE INDEX for.\n\n key:\n The name of the hstore key\n to create the name for.\n\n This can also be a tuple\n of multiple names.\n\n Returns:\n The name for the UNIQUE index.", "id": "f362:c0:m9"} {"signature": "def alter_db_table(self, model, old_db_table, new_db_table):", "body": "super(SchemaEditor, self).alter_db_table(model, old_db_table, new_db_table)for mixin in self.post_processing_mixins:mixin.alter_db_table(model,old_db_table,new_db_table)", "docstring": "Ran when the name of a model is changed.", "id": "f363:c0:m3"} {"signature": "def add_field(self, model, field):", "body": "super(SchemaEditor, self).add_field(model, field)for mixin in self.post_processing_mixins:mixin.add_field(model, field)", "docstring": "Ran when a field is added to a model.", "id": "f363:c0:m4"} {"signature": "def create_model(self, model):", "body": "super().create_model(model)for mixin in self.post_processing_mixins:mixin.create_model(model)", "docstring": "Ran when a new model is created.", "id": "f363:c0:m1"} {"signature": "def alter_field(self, model, old_field, new_field, strict=False):", "body": "super(SchemaEditor, self).alter_field(model, old_field, new_field, strict)for mixin in self.post_processing_mixins:mixin.alter_field(model, old_field, new_field, strict)", "docstring": "Ran when the configuration on a field changed.", "id": "f363:c0:m6"} {"signature": "def remove_field(self, model, field):", "body": "for mixin in self.post_processing_mixins:mixin.remove_field(model, field)super(SchemaEditor, self).remove_field(model, field)", "docstring": "Ran when a field is removed from a model.", "id": "f363:c0:m5"} {"signature": "def _get_schema_editor_base():", "body": "return _get_backend_base().SchemaEditorClass", "docstring": "Gets the base class for the schema editor.\n\n We have to use the configured base back-end's\n schema editor for this.", "id": "f363:m1"} {"signature": "def unescape(value):", "body": "pattern = ESCAPE_FMT.replace('', '')pattern_bytes = pattern.encode('')re_esc = re.compile(pattern_bytes)return re_esc.sub(_unescape_code, value.encode('')).decode('')", "docstring": "Inverse of escape.", "id": "f383:m3"} {"signature": "def get_password(self, service, username):", "body": "assoc = self._generate_assoc(service, username)service = escape_for_ini(service)username = escape_for_ini(username)config = configparser.RawConfigParser()if os.path.exists(self.file_path):config.read(self.file_path)try:password_base64 = config.get(service, username).encode()password_encrypted = decodebytes(password_base64)try:password = self.decrypt(password_encrypted, assoc).decode('')except ValueError:password = self.decrypt(password_encrypted).decode('')except (configparser.NoOptionError, configparser.NoSectionError):password = Nonereturn password", "docstring": "Read the password from the file.", "id": "f384:c1:m2"} {"signature": "@properties.NonDataPropertydef file_version(self):", "body": "return None", "docstring": "The encryption version used in file to store the passwords.", "id": "f384:c0:m4"} {"signature": "@abc.abstractmethoddef encrypt(self, password, assoc = None):", "body": "", "docstring": "Given a password (byte string) and assoc (byte string, optional),\nreturn an encrypted byte string.\n\nassoc provides associated data (typically: service and username)", "id": "f384:c1:m0"} {"signature": "@abc.abstractpropertydef scheme(self):", "body": "return ''", "docstring": "The encryption scheme used to store the passwords.", "id": "f384:c0:m2"} {"signature": "@abc.abstractpropertydef version(self):", "body": "return None", "docstring": "The encryption version used to store the passwords.", "id": "f384:c0:m3"} {"signature": "def _check_version(self, config):", "body": "try:self.file_version = config.get(escape_for_ini(''),escape_for_ini(''),)except (configparser.NoSectionError, configparser.NoOptionError):return Falsereturn True", "docstring": "check for a valid version\nan existing scheme implies an existing version as well\n\nreturn True, if version is valid, and False otherwise", "id": "f386:c1:m4"} {"signature": "@properties.ClassProperty@classmethoddef priority(self):", "body": "try:__import__('')except ImportError: raise RuntimeError(\"\")try:__import__('')except ImportError: raise RuntimeError(\"\")if not json: raise RuntimeError(\"\"\"\")return ", "docstring": "Applicable for all platforms, where the schemes, that are integrated\nwith your environment, does not fit.", "id": "f386:c1:m0"} {"signature": "def _check_scheme(self, config):", "body": "try:scheme = config.get(escape_for_ini(''),escape_for_ini(''),)except (configparser.NoSectionError, configparser.NoOptionError):raise AttributeError(\"\")if scheme.startswith(''):scheme = scheme[:]if scheme != self.scheme:raise ValueError(\"\"\"\" % (self.scheme, scheme))", "docstring": "check for a valid scheme\n\nraise ValueError otherwise\nraise AttributeError if missing", "id": "f388:c2:m4"} {"signature": "def _unlock(self):", "body": "self.keyring_key = getpass.getpass('')try:ref_pw = self.get_password('', '')assert ref_pw == ''except AssertionError:self._lock()raise ValueError(\"\")", "docstring": "Unlock this keyring by getting the password for the keyring from the\nuser.", "id": "f388:c2:m6"} {"signature": "def _migrate(self, keyring_password=None):", "body": "", "docstring": "Convert older keyrings to the current format.", "id": "f388:c2:m10"} {"signature": "@staticmethoddef str_to_num(str_value):", "body": "str_value = str(str_value)try:return int(str_value)except ValueError:return float(str_value)", "docstring": "Convert str_value to an int or a float, depending on the\n numeric value represented by str_value.", "id": "f390:c4:m3"} {"signature": "@staticmethoddef is_convertible(value):", "body": "try:float(value)return Trueexcept ValueError:return Falseexcept TypeError:return False", "docstring": "Return True if value can be converted to a float.", "id": "f390:c4:m2"} {"signature": "def set_assert(self, obj, val, attr=\"\"):", "body": "setattr(obj, attr, val)self.assertEqual(getattr(obj, attr), val)", "docstring": "Assert a valid assignment works.", "id": "f391:c0:m0"} {"signature": "@staticmethoddef exc_thrown_by_descriptor():", "body": "traceback = sys.exc_info()[]tb_locals = traceback.tb_frame.f_localsif \"\" in tb_locals:if not isinstance(tb_locals[\"\"], Descriptor):return Falsereturn Truereturn False", "docstring": "Return True if the last exception was thrown by a\n Descriptor instance.", "id": "f400:c1:m10"} {"signature": "def _infer_length(iterable):", "body": "try:return len(iterable)except (AttributeError, TypeError): try:get_hint = type(iterable).__length_hint__except AttributeError:return Nonetry:hint = get_hint(iterable)except TypeError:return Noneif (hint is NotImplemented ornot isinstance(hint, int) orhint < ):return Nonereturn hint", "docstring": "Try and infer the length using the PEP 424 length hint if available.\n\nadapted from click implementation", "id": "f404:m0"} {"signature": "@classmethoddef set_lock(cls, lock):", "body": "pass", "docstring": "tqdm api compatibility. does nothing", "id": "f404:c0:m10"} {"signature": "def _update_measurements(self):", "body": "self._last_idx = self._now_idxself._last_time = self._now_timeself._now_idx = self._iter_idxself._now_time = default_timer()self._between_time = self._now_time - self._last_timeself._between_count = self._now_idx - self._last_idxself._total_seconds = self._now_time - self._start_time", "docstring": "update current measurements and estimated of time and progress", "id": "f404:c2:m12"} {"signature": "def refresh(self, nolock=False):", "body": "if not self.started:self.begin()self.display_message()", "docstring": "tqdm api compatibility. redisplays message\n(can cause a message to print twice)", "id": "f404:c0:m8"} {"signature": "def set_description(self, desc=None, refresh=True):", "body": "self.desc = descif refresh:self.refresh()", "docstring": "tqdm api compatibility. Changes the description of progress", "id": "f404:c0:m1"} {"signature": "def clear(self, nolock=False):", "body": "pass", "docstring": "tqdm api compatibility. does nothing", "id": "f404:c0:m7"} {"signature": "@classmethoddef write(cls, s, file=None, end='', nolock=False):", "body": "fp = file if file is not None else sys.stdoutfp.write(s)fp.write(end)", "docstring": "simply writes to stdout", "id": "f404:c0:m0"} {"signature": "def set_extra(self, extra):", "body": "self.extra = extra", "docstring": "specify a custom info appended to the end of the next message\n\nTODO:\n - [ ] extra is a bad name; come up with something better and rename\n\nExample:\n >>> import progiter\n >>> prog = progiter.ProgIter(range(100, 300, 100), show_times=False, verbose=3)\n >>> for n in prog:\n >>> prog.set_extra('processesing num {}'.format(n))\n 0/2...\n 1/2...processesing num 100\n 2/2...processesing num 200", "id": "f404:c2:m5"} {"signature": "def __init__(self, iterable=None, desc=None, total=None, freq=,initial=, eta_window=, clearline=True, adjust=True,time_thresh=, show_times=True, enabled=True, verbose=None,stream=None, chunksize=None, **kwargs):", "body": "if desc is None:desc = ''if verbose is not None:if verbose <= : enabled = Falseelif verbose == : enabled, clearline, adjust = , , elif verbose == : enabled, clearline, adjust = , , elif verbose >= : enabled, clearline, adjust = , , self._microseconds = kwargs.pop('', False)if kwargs:stream = kwargs.pop('', stream)enabled = not kwargs.pop('', not enabled)if kwargs.get('', None) is not None:adjust = Falsefreq = kwargs.pop('', freq)kwargs.pop('', None) kwargs.pop('', None) kwargs.pop('', True) desc = kwargs.pop('', desc)total = kwargs.pop('', total)enabled = kwargs.pop('', enabled)initial = kwargs.pop('', initial)if kwargs:raise ValueError(''.format(kwargs))if stream is None:stream = sys.stdoutself.stream = streamself.iterable = iterableself.desc = descself.total = totalself.freq = freqself.initial = initialself.enabled = enabledself.adjust = adjustself.show_times = show_timesself.eta_window = eta_windowself.time_thresh = self.clearline = clearlineself.chunksize = chunksizeself.extra = ''self.started = Falseself.finished = Falseself._reset_internals()", "docstring": "Notes:\n See attributes for arg information\n **kwargs accepts most of the tqdm api", "id": "f404:c2:m0"} {"signature": "def close(self):", "body": "self.end()", "docstring": "alias of `end` for tqdm compatibility", "id": "f404:c0:m4"} {"signature": "def __enter__(self):", "body": "self.begin()return self", "docstring": "Example:\n >>> # can be used as a context manager in iter mode\n >>> n = 3\n >>> with ProgIter(desc='manual', total=n, verbose=3) as prog:\n ... list(prog(range(n)))", "id": "f404:c2:m2"} {"signature": "def parse_version(package):", "body": "from os.path import dirname, joinimport astinit_fpath = join(dirname(__file__), package, '')with open(init_fpath) as file_:sourcecode = file_.read()pt = ast.parse(sourcecode)class VersionVisitor(ast.NodeVisitor):def visit_Assign(self, node):for target in node.targets:if target.id == '':self.version = node.value.svisitor = VersionVisitor()visitor.visit(pt)return visitor.version", "docstring": "Statically parse the version number from __init__.py\n\nCommandLine:\n python -c \"import setup; print(setup.parse_version('progiter'))\"", "id": "f406:m0"} {"signature": "def _list(api_list_class, arg_namespace, **extra):", "body": "if arg_namespace.starting_point:ordering_field = (arg_namespace.ordering or '').lstrip('')if ordering_field in ('', '', ''):arg_namespace.starting_point = parser.parse(arg_namespace.starting_point)items = api_list_class(starting_point=arg_namespace.starting_point,ordering=arg_namespace.ordering,limit=arg_namespace.limit,request_limit=arg_namespace.request_limit,**extra)items.constructor = lambda x: xtry:pprint(list(items))except ValueError as e:print(e)", "docstring": "A common function for building methods of the \"list showing\".", "id": "f424:m0"} {"signature": "def is_removed(self):", "body": "return self.info().get('') is not None", "docstring": "Returns ``True`` if file is removed.\n\n It might do API request once because it depends on ``info()``.", "id": "f427:c0:m17"} {"signature": "@classmethoddef upload(cls, file_obj, store=None):", "body": "if store is None:store = ''elif store:store = ''else:store = ''data = {'': store,}files = uploading_request('', '', data=data,files={'': file_obj})file_ = cls(files[''])return file_", "docstring": "Uploads a file and returns ``File`` instance.\n\n Args:\n - file_obj: file object to upload to\n - store (Optional[bool]): Should the file be automatically stored\n upon upload. Defaults to None.\n - False - do not store file\n - True - store file (can result in error if autostore\n is disabled for project)\n - None - use project settings\n\n Returns:\n ``File`` instance", "id": "f427:c0:m28"} {"signature": "@classmethoddef create(cls, files):", "body": "data = {}for index, file_ in enumerate(files):if isinstance(file_, File):file_index = ''.format(index=index)data[file_index] = six.text_type(file_)else:raise InvalidParamError('')if not data:raise InvalidParamError('')group_info = uploading_request('', '', data=data)group = cls.construct_from(group_info)return group", "docstring": "Creates file group and returns ``FileGroup`` instance.\n\n It expects iterable object that contains ``File`` instances, e.g.::\n\n >>> file_1 = File('6c5e9526-b0fe-4739-8975-72e8d5ee6342')\n >>> file_2 = File('a771f854-c2cb-408a-8c36-71af77811f3b')\n >>> FileGroup.create((file_1, file_2))\n ", "id": "f427:c1:m16"} {"signature": "def is_ready(self):", "body": "return self.info().get('')", "docstring": "Returns ``True`` if the file is fully uploaded on S3.\n\n It might do API request once because it depends on ``info()``.", "id": "f427:c0:m19"} {"signature": "def __init__(self, seq):", "body": "if not isinstance(seq, Iterable):raise TypeError('')self._seq = seq", "docstring": "Seq can be:\n * list of UUIDs\n * list of File's instances\n * instance of FileList", "id": "f427:c4:m0"} {"signature": "def uuids(self):", "body": "for f in self._seq:if isinstance(f, File):yield f.uuidelif isinstance(f, six.string_types):yield felse:raise ValueError(''.format(type(f)))", "docstring": "Extract uuid from each item of specified ``seq``.", "id": "f427:c4:m4"} {"signature": "@propertydef cdn_url(self):", "body": "return ''.format(cdn_base=conf.cdn_base,group_id=self.id)", "docstring": "Returns group's CDN url.\n\n Usage example::\n\n >>> file_group = FileGroup('0513dda0-582f-447d-846f-096e5df9e2bb~2')\n >>> file_group.cdn_url\n https://ucarecdn.com/0513dda0-582f-447d-846f-096e5df9e2bb~2/", "id": "f427:c1:m7"} {"signature": "@classmethoddef construct_from(cls, file_info):", "body": "file_ = cls(file_info[''])file_.default_effects = file_info.get('')file_._info_cache = file_inforeturn file_", "docstring": "Constructs ``File`` instance from file information.\n\n For example you have result of\n ``/files/1921953c-5d94-4e47-ba36-c2e1dd165e1a/`` API request::\n\n >>> file_info = {\n # ...\n 'uuid': '1921953c-5d94-4e47-ba36-c2e1dd165e1a',\n # ...\n }\n >>> File.construct_from(file_info)\n ", "id": "f427:c0:m27"} {"signature": "def is_stored(self):", "body": "return self.info().get('') is not None", "docstring": "Returns ``True`` if file is stored.\n\n It might do API request once because it depends on ``info()``.", "id": "f427:c0:m16"} {"signature": "def store(self):", "body": "return self._base_opration('')", "docstring": "Store all specified files.", "id": "f427:c4:m1"} {"signature": "def delete(self):", "body": "self._info_cache = rest_request('', self._api_uri)", "docstring": "Deletes file by requesting Uploadcare API.", "id": "f427:c0:m26"} {"signature": "def size(self):", "body": "return self.info().get('')", "docstring": "Returns the file size in bytes.\n\n It might do API request once because it depends on ``info()``.", "id": "f427:c0:m20"} {"signature": "def nova_process(body, message):", "body": "event_type = body['']process = nova_customer_process.get(event_type)if process is not None:process(body, message)else:matched = Falseprocess_wildcard = Nonefor pattern in nova_customer_process_wildcard.keys():if pattern.match(event_type):process_wildcard = nova_customer_process_wildcard.get(pattern)matched = Truebreakif matched:process_wildcard(body, message)else:default_process(body, message)message.ack()", "docstring": "This function deal with the nova notification.\n\nFirst, find process from customer_process that not include wildcard.\nif not find from customer_process, then find process from customer_process_wildcard.\nif not find from customer_process_wildcard, then use ternya default process.\n:param body: dict of openstack notification.\n:param message: kombu Message class\n:return:", "id": "f442:m0"} {"signature": "def glance(*arg):", "body": "check_event_type(Openstack.Glance, *arg)event_type = arg[]def decorator(func):if event_type.find(\"\") != -:event_type_pattern = pre_compile(event_type)glance_customer_process_wildcard[event_type_pattern] = funcelse:glance_customer_process[event_type] = funclog.info(\"\".format(func.__name__, event_type))@functools.wraps(func)def wrapper(*args, **kwargs):func(*args, **kwargs)return wrapperreturn decorator", "docstring": "Glance annotation for adding function to process glance notification.\n\nif event_type include wildcard, will put {pattern: function} into process_wildcard dict\nelse will put {event_type: function} into process dict\n\n:param arg: event_type of notification", "id": "f444:m3"} {"signature": "def neutron(*arg):", "body": "check_event_type(Openstack.Neutron, *arg)event_type = arg[]def decorator(func):if event_type.find(\"\") != -:event_type_pattern = pre_compile(event_type)neutron_customer_process_wildcard[event_type_pattern] = funcelse:neutron_customer_process[event_type] = funclog.info(\"\".format(func.__name__, event_type))@functools.wraps(func)def wrapper(*args, **kwargs):func(*args, **kwargs)return wrapperreturn decorator", "docstring": "Neutron annotation for adding function to process neutron notification.\n\nif event_type include wildcard, will put {pattern: function} into process_wildcard dict\nelse will put {event_type: function} into process dict\n\n:param arg: event_type of notification", "id": "f444:m2"} {"signature": "def init_glance_consumer(self, mq):", "body": "if not self.enable_component_notification(Openstack.Glance):log.debug(\"\")returnfor i in range(self.config.glance_mq_consumer_count):mq.create_consumer(self.config.glance_mq_exchange,self.config.glance_mq_queue,ProcessFactory.process(Openstack.Glance))log.debug(\"\")", "docstring": "Init openstack glance mq\n\n1. Check if enable listening glance notification\n2. Create consumer\n\n:param mq: class ternya.mq.MQ", "id": "f446:c0:m10"} {"signature": "def init_nova_consumer(self, mq):", "body": "if not self.enable_component_notification(Openstack.Nova):log.debug(\"\")returnfor i in range(self.config.nova_mq_consumer_count):mq.create_consumer(self.config.nova_mq_exchange,self.config.nova_mq_queue,ProcessFactory.process(Openstack.Nova))log.debug(\"\")", "docstring": "Init openstack nova mq\n\n1. Check if enable listening nova notification\n2. Create consumer\n\n:param mq: class ternya.mq.MQ", "id": "f446:c0:m7"} {"signature": "def find_file_match(folder_path, regex=''):", "body": "outlist = []for root, dirs, files in os.walk(folder_path):outlist.extend([os.path.join(root, f) for f in filesif re.match(regex, f)])return outlist", "docstring": "Returns absolute paths of files that match the regex within folder_path and\nall its children folders.\n\nNote: The regex matching is done using the match function\nof the re module.\n\nParameters\n----------\nfolder_path: string\n\nregex: string\n\nReturns\n-------\nA list of strings.", "id": "f456:m0"} {"signature": "@classmethoddef from_json_str(cls, json_str):", "body": "dct = json_to_dict(json_str)return cls(**dct)", "docstring": "Convert json string representation into class instance.\n\n Args:\n json_str: json representation as string.\n\n Returns:\n New instance of the class with data loaded from json string.", "id": "f458:c0:m0"} {"signature": "def change_xml_encoding(filepath, src_enc, dst_enc=''):", "body": "enc_attr = \"\"replace_file_content(filepath, enc_attr.format(src_enc), enc_attr.format(dst_enc), )", "docstring": "Modify the encoding entry in the XML file.\n\n Parameters\n ----------\n filepath: str\n Path to the file to be modified.\n\n src_enc: str\n Encoding that is written in the file\n\n dst_enc: str\n Encoding to be set in the file.", "id": "f461:m2"} {"signature": "def write_to_file(file_path, content, encoding=None):", "body": "try:with open(file_path, \"\") as f:f.write(content.encode(encoding))except:log.exception(''.format(file_path))raise", "docstring": "Write `content` inside the file in `file_path` with the given encoding.\n Parameters\n ----------\n file_path: str\n Path to the output file. Will be overwritten if exists.\n\n content: str\n The content you want in the file.\n\n encoding: str\n The name of the encoding.", "id": "f464:m7"} {"signature": "def get_extension(filepath, check_if_exists=False):", "body": "if check_if_exists:if not os.path.exists(filepath):err = '' + filepathlog.error(err)raise IOError(err)try:rest, ext = os.path.splitext(filepath)except:raiseelse:return ext", "docstring": "Return the extension of fpath.\n\n Parameters\n ----------\n fpath: string\n File name or path\n\n check_if_exists: bool\n\n Returns\n -------\n str\n The extension of the file name or path", "id": "f464:m0"} {"signature": "def get_tempfile(suffix='', dirpath=None):", "body": "if dirpath is None:dirpath = get_temp_dir()return tempfile.NamedTemporaryFile(suffix=suffix, dir=dirpath)", "docstring": "Return a temporary file with the given suffix within dirpath.\n If dirpath is None, will look for a temporary folder in your system.\n\n Parameters\n ----------\n suffix: str\n Temporary file name suffix\n\n dirpath: str\n Folder path where create the temporary file\n\n Returns\n -------\n temp_filepath: str\n The path to the temporary path", "id": "f464:m3"} {"signature": "def fill(self, doc_contents):", "body": "for key, content in doc_contents.items():doc_contents[key] = replace_chars_for_svg_code(content)return super(SVGDocument, self).fill(doc_contents=doc_contents)", "docstring": "Fill the content of the document with the information in doc_contents.\n This is different from the TextDocument fill function, because this will\n check for symbools in the values of `doc_content` and replace them\n to good XML codes before filling the template.\n\n Parameters\n ----------\n doc_contents: dict\n Set of values to set the template document.\n\n Returns\n -------\n filled_doc: str\n The content of the document with the template information filled.", "id": "f470:c1:m0"} {"signature": "def _setup_template_file(self, template_file_path):", "body": "try:template_file = template_file_pathtemplate_env = get_environment_for(template_file_path)template = template_env.get_template(os.path.basename(template_file))except:raiseelse:self._template_file = template_fileself._template_env = template_envself.template = template", "docstring": "Setup self.template\n\n Parameters\n ----------\n template_file_path: str\n Document template file path.", "id": "f470:c0:m1"} {"signature": "def render(self, file_path, **kwargs):", "body": "temp = get_tempfile(suffix='')self.save_content(temp.name)file_type = kwargs.get('', '')dpi = kwargs.get('', )support_unicode = kwargs.get('', False)try:if file_type == '':shutil.copyfile(temp.name, file_path)elif file_type == '':svg2png(temp.name, file_path, dpi=dpi)elif file_type == '':svg2pdf(temp.name, file_path, dpi=dpi, support_unicode=support_unicode)except:log.exception(''.format(file_path, file_type))raise", "docstring": "Save the content of the .svg file in the chosen rendered format.\n\n Parameters\n ----------\n file_path: str\n Path to the output file.\n\n Kwargs\n ------\n file_type: str\n Choices: 'png', 'pdf', 'svg'\n Default: 'pdf'\n\n dpi: int\n Dots-per-inch for the png and pdf.\n Default: 150\n\n support_unicode: bool\n Whether to allow unicode to be encoded in the PDF.\n Default: False", "id": "f470:c1:m1"} {"signature": "def tex2pdf(tex_file, output_file=None, output_format=''):", "body": "if not os.path.exists(tex_file):raise IOError(''.format(tex_file))if output_format != '' and output_format != '':raise ValueError(\"\".format(output_format))cmd_name = ''check_command(cmd_name)args_strings = [cmd_name]if output_file is not None:args_strings += [''.format(os.path.abspath(os.path.dirname(output_file)))]result_dir = os.path.dirname(output_file) if output_file else os.path.dirname(tex_file)args_strings += [''.format(output_format)]args_strings += ['' + tex_file + '']log.debug(''.format(cmd_name, args_strings))ret = simple_call(args_strings)result_file = os.path.join(result_dir, remove_ext(os.path.basename(tex_file)) + '' + output_format)if os.path.exists(result_file):shutil.move(result_file, output_file)else:raise IOError('')log.debug(''.format(result_dir))cleanup(result_dir, '')cleanup(result_dir, '')return ret", "docstring": "Call PDFLatex to convert TeX files to PDF.\n\n Parameters\n ----------\n tex_file: str\n Path to the input LateX file.\n\n output_file: str\n Path to the output PDF file.\n If None, will use the same output directory as the tex_file.\n\n output_format: str\n Output file format. Choices: 'pdf' or 'dvi'. Default: 'pdf'\n\n Returns\n -------\n return_value\n PDFLatex command call return value.", "id": "f471:m0"} {"signature": "def __init__(self, thing):", "body": "self.thing = thing", "docstring": "Initialize the container.\n\nthing -- the thing to store", "id": "f478:c0:m0"} {"signature": "def set_default_headers(self, *args, **kwargs):", "body": "self.set_header('', '')self.set_header('','')self.set_header('','')", "docstring": "Set the default headers for all requests.", "id": "f478:c4:m2"} {"signature": "def get_thing(self, idx):", "body": "try:idx = int(idx)except ValueError:return Noneif idx < or idx >= len(self.things):return Nonereturn self.things[idx]", "docstring": "Get the thing at the given index.\n\nidx -- the index", "id": "f478:c1:m1"} {"signature": "def get(self):", "body": "self.set_header('', '')ws_href = ''.format('' if self.request.protocol == '' else '',self.request.headers.get('', ''))descriptions = []for thing in self.things.get_things():description = thing.as_thing_description()description[''].append({'': '','': ''.format(ws_href, thing.get_href()),})descriptions.append(description)self.write(json.dumps(descriptions))", "docstring": "Handle a GET request.\n\nproperty_name -- the name of the property from the URL path", "id": "f478:c3:m0"} {"signature": "def get(self, thing_id='', property_name=None):", "body": "thing = self.get_thing(thing_id)if thing is None:self.set_status()returnif thing.has_property(property_name):self.set_header('', '')self.write(json.dumps({property_name: thing.get_property(property_name),}))else:self.set_status()", "docstring": "Handle a GET request.\n\nthing_id -- ID of the thing this request is for\nproperty_name -- the name of the property from the URL path", "id": "f478:c6:m0"} {"signature": "def delete(self, thing_id='', action_name=None, action_id=None):", "body": "thing = self.get_thing(thing_id)if thing is None:self.set_status()returnif thing.remove_action(action_name, action_id):self.set_status()else:self.set_status()", "docstring": "Handle a DELETE request.\n\nthing_id -- ID of the thing this request is for\naction_name -- name of the action from the URL path\naction_id -- the action ID from the URL path", "id": "f478:c9:m2"} {"signature": "def open(self):", "body": "self.thing.add_subscriber(self)", "docstring": "Handle a new connection.", "id": "f478:c4:m5"} {"signature": "def get(self, thing_id=''):", "body": "thing = self.get_thing(thing_id)if thing is None:self.set_status()returnself.set_header('', '')self.write(json.dumps(thing.get_properties()))", "docstring": "Handle a GET request.\n\nthing_id -- ID of the thing this request is for", "id": "f478:c5:m0"} {"signature": "def get(self, thing_id=''):", "body": "thing = self.get_thing(thing_id)if thing is None:self.set_status()returnself.set_header('', '')self.write(json.dumps(thing.get_event_descriptions()))", "docstring": "Handle a GET request.\n\nthing_id -- ID of the thing this request is for", "id": "f478:c10:m0"} {"signature": "def on_close(self):", "body": "self.thing.remove_subscriber(self)", "docstring": "Handle a close event on the socket.", "id": "f478:c4:m7"} {"signature": "def get(self, thing_id='', event_name=None):", "body": "thing = self.get_thing(thing_id)if thing is None:self.set_status()returnself.set_header('', '')self.write(json.dumps(thing.get_event_descriptions(event_name=event_name)))", "docstring": "Handle a GET request.\n\nthing_id -- ID of the thing this request is for\nevent_name -- name of the event from the URL path", "id": "f478:c11:m0"} {"signature": "def initialize(self, things, hosts):", "body": "self.things = thingsself.hosts = hosts", "docstring": "Initialize the handler.\n\nthings -- list of Things managed by this server\nhosts -- list of allowed hostnames", "id": "f478:c4:m0"} {"signature": "def __init__(self, things, name):", "body": "self.things = thingsself.name = name", "docstring": "Initialize the container.\n\nthings -- the things to store\nname -- the mDNS server name", "id": "f478:c1:m0"} {"signature": "def get_things(self):", "body": "return self.things", "docstring": "Get the list of things.", "id": "f478:c1:m2"} {"signature": "def on_message(self, message):", "body": "try:message = json.loads(message)except ValueError:try:self.write_message(json.dumps({'': '','': {'': '','': '',},}))except tornado.websocket.WebSocketClosedError:passreturnif '' not in message or '' not in message:try:self.write_message(json.dumps({'': '','': {'': '','': '',},}))except tornado.websocket.WebSocketClosedError:passreturnmsg_type = message['']if msg_type == '':for property_name, property_value in message[''].items():try:self.thing.set_property(property_name, property_value)except PropertyError as e:self.write_message(json.dumps({'': '','': {'': '','': str(e),},}))elif msg_type == '':for action_name, action_params in message[''].items():input_ = Noneif '' in action_params:input_ = action_params['']action = self.thing.perform_action(action_name, input_)if action:tornado.ioloop.IOLoop.current().spawn_callback(perform_action,action,)else:self.write_message(json.dumps({'': '','': {'': '','': '','': message,},}))elif msg_type == '':for event_name in message[''].keys():self.thing.add_event_subscriber(event_name, self)else:try:self.write_message(json.dumps({'': '','': {'': '','': '' + msg_type,'': message,},}))except tornado.websocket.WebSocketClosedError:pass", "docstring": "Handle an incoming message.\n\nmessage -- message to handle", "id": "f478:c4:m6"} {"signature": "def validate_value(self, value):", "body": "if '' in self.metadata and self.metadata['']:raise PropertyError('')try:validate(value, self.metadata)except ValidationError:raise PropertyError('')", "docstring": "Validate new property value before setting it.\n\nvalue -- New value", "id": "f479:c0:m1"} {"signature": "def get_name(self):", "body": "return self.name", "docstring": "Get the name of this property.\n\nReturns the name.", "id": "f479:c0:m7"} {"signature": "def __init__(self, thing, name, value, metadata=None):", "body": "self.thing = thingself.name = nameself.value = valueself.href_prefix = ''self.href = ''.format(self.name)self.metadata = metadata if metadata is not None else {}self.value.on('', lambda _: self.thing.property_notify(self))", "docstring": "Initialize the object.\n\nthing -- the Thing this property belongs to\nname -- name of the property\nvalue -- Value object to hold the property value\nmetadata -- property metadata, i.e. type, description, unit, etc.,\n as a dict", "id": "f479:c0:m0"} {"signature": "def get_href(self):", "body": "return self.href_prefix + self.href", "docstring": "Get the href of this property.\n\nReturns the href.", "id": "f479:c0:m4"} {"signature": "def add_available_action(self, name, metadata, cls):", "body": "if metadata is None:metadata = {}self.available_actions[name] = {'': metadata,'': cls,}self.actions[name] = []", "docstring": "Add an available action.\n\nname -- name of the action\nmetadata -- action metadata, i.e. type, description, etc., as a dict\ncls -- class to instantiate for this action", "id": "f481:c0:m25"} {"signature": "def remove_property(self, property_):", "body": "if property_.name in self.properties:del self.properties[property_.name]", "docstring": "Remove a property from this thing.\n\nproperty_ -- property to remove", "id": "f481:c0:m14"} {"signature": "def add_event(self, event):", "body": "self.events.append(event)self.event_notify(event)", "docstring": "Add a new event and notify subscribers.\n\nevent -- the event that occurred", "id": "f481:c0:m21"} {"signature": "def as_thing_description(self):", "body": "thing = {'': self.name,'': self.href_prefix if self.href_prefix else '','': self.context,'': self.type,'': self.get_property_descriptions(),'': {},'': {},'': [{'': '','': ''.format(self.href_prefix),},{'': '','': ''.format(self.href_prefix),},{'': '','': ''.format(self.href_prefix),},],}for name, action in self.available_actions.items():thing[''][name] = action['']thing[''][name][''] = [{'': '','': ''.format(self.href_prefix, name),},]for name, event in self.available_events.items():thing[''][name] = event['']thing[''][name][''] = [{'': '','': ''.format(self.href_prefix, name),},]if self.ui_href is not None:thing[''].append({'': '','': '','': self.ui_href,})if self.description:thing[''] = self.descriptionreturn thing", "docstring": "Return the thing state as a Thing Description.\n\nReturns the state as a dictionary.", "id": "f481:c0:m1"} {"signature": "def get_action_descriptions(self, action_name=None):", "body": "descriptions = []if action_name is None:for name in self.actions:for action in self.actions[name]:descriptions.append(action.as_action_description())elif action_name in self.actions:for action in self.actions[action_name]:descriptions.append(action.as_action_description())return descriptions", "docstring": "Get the thing's actions as an array.\n\naction_name -- Optional action name to get descriptions for\n\nReturns the action descriptions.", "id": "f481:c0:m11"} {"signature": "def get_event_descriptions(self, event_name=None):", "body": "if event_name is None:return [e.as_event_description() for e in self.events]else:return [e.as_event_description()for e in self.events if e.get_name() == event_name]", "docstring": "Get the thing's events as an array.\n\nevent_name -- Optional event name to get descriptions for\n\nReturns the event descriptions.", "id": "f481:c0:m12"} {"signature": "def get_ui_href(self):", "body": "return self.ui_href", "docstring": "Get the UI href.", "id": "f481:c0:m3"} {"signature": "def event_notify(self, event):", "body": "if event.name not in self.available_events:returnmessage = json.dumps({'': '','': event.as_event_description(),})for subscriber in self.available_events[event.name]['']:try:subscriber.write_message(message)except tornado.websocket.WebSocketClosedError:pass", "docstring": "Notify all subscribers of an event.\n\nevent -- the event that occurred", "id": "f481:c0:m32"} {"signature": "def perform_action(self, action_name, input_=None):", "body": "if action_name not in self.available_actions:return Noneaction_type = self.available_actions[action_name]if '' in action_type['']:try:validate(input_, action_type[''][''])except ValidationError:return Noneaction = action_type[''](self, input_=input_)action.set_href_prefix(self.href_prefix)self.action_notify(action)self.actions[action_name].append(action)return action", "docstring": "Perform an action on the thing.\n\naction_name -- name of the action\ninput_ -- any action inputs\n\nReturns the action that was created.", "id": "f481:c0:m23"} {"signature": "def add_property(self, property_):", "body": "property_.set_href_prefix(self.href_prefix)self.properties[property_.name] = property_", "docstring": "Add a property to this thing.\n\nproperty_ -- property to add", "id": "f481:c0:m13"} {"signature": "def get_action(self, action_name, action_id):", "body": "if action_name not in self.actions:return Nonefor action in self.actions[action_name]:if action.id == action_id:return actionreturn None", "docstring": "Get an action.\n\naction_name -- name of the action\naction_id -- ID of the action\n\nReturns the requested action if found, else None.", "id": "f481:c0:m20"} {"signature": "def start(self):", "body": "self.status = ''self.thing.action_notify(self)self.perform_action()self.finish()", "docstring": "Start performing the action.", "id": "f482:c0:m11"} {"signature": "def get_time_completed(self):", "body": "return self.time_completed", "docstring": "Get the time the action was completed.", "id": "f482:c0:m9"} {"signature": "def finish(self):", "body": "self.status = ''self.time_completed = timestamp()self.thing.action_notify(self)", "docstring": "Finish performing the action.", "id": "f482:c0:m14"} {"signature": "def as_event_description(self):", "body": "description = {self.name: {'': self.time,},}if self.data is not None:description[self.name][''] = self.datareturn description", "docstring": "Get the event description.\n\nReturns a dictionary describing the event.", "id": "f483:c0:m1"} {"signature": "def __init__(self, thing, name, data=None):", "body": "self.thing = thingself.name = nameself.data = dataself.time = timestamp()", "docstring": "Initialize the object.\n\nthing -- Thing this event belongs to\nname -- name of the event\ndata -- data associated with the event", "id": "f483:c0:m0"} {"signature": "def get_total_vat(self):", "body": "raise NotImplementedError()", "docstring": "Return the total VAT amount.", "id": "f494:c1:m8"} {"signature": "def get_ledger_code_to_guid_map(self, codes):", "body": "if codes:codes = set(str(i) for i in codes)ledger_ids = self._api.ledgeraccounts.filter(code__in=codes)ret = dict((str(i['']), i['']) for i in ledger_ids)found = set(ret.keys())missing = (codes - found)if missing:raise UnknownLedgerCodes(missing)return retreturn {}", "docstring": "Convert set of human codes and to a dict of code to exactonline\nguid mappings.\n\nExample::\n\n ret = inv.get_ledger_code_to_guid_map(['1234', '5555'])\n ret == {'1234': '',\n '5555': ''}", "id": "f494:c1:m4"} {"signature": "def set_division(self, division):", "body": "try:division = int(division)except (TypeError, ValueError):raise V1DivisionError('' %(division,))urlbase = '' % (division,)resource = urljoin(urlbase,\"\")try:self.rest(GET(resource))except AssertionError:raise V1DivisionError('' %(division,))self.storage.set_division(division)", "docstring": "Select the \"current\" division that we'll be working on/with.", "id": "f500:c1:m2"} {"signature": "def map_exact2foreign_invoice_numbers(self, exact_invoice_numbers=None):", "body": "if exact_invoice_numbers is None:ret = self.filter(select='')return dict((i[''], i['']) for i in ret)exact_to_foreign_map = {}exact_invoice_numbers = list(set(exact_invoice_numbers)) for offset in range(, len(exact_invoice_numbers), ):batch = exact_invoice_numbers[offset:(offset + )]filter_ = ''.join('' % (i,) for i in batch)assert filter_ ret = self.filter(filter=filter_, select='')exact_to_foreign_map.update(dict((i[''], i['']) for i in ret))for exact_invoice_number in exact_invoice_numbers:if exact_invoice_number not in exact_to_foreign_map:exact_to_foreign_map[exact_invoice_number] = Nonereturn exact_to_foreign_map", "docstring": "Optionally supply a list of ExactOnline invoice numbers.\n\nReturns a dictionary of ExactOnline invoice numbers to foreign\n(YourRef) invoice numbers.", "id": "f505:c0:m2"} {"signature": "def http_delete(url, opt=opt_default):", "body": "return _http_request(url, method='', opt=opt)", "docstring": "Shortcut for urlopen (DELETE) + read. We'll probably want to add a\nnice timeout here later too.", "id": "f508:m1"} {"signature": "def get(self, section, option, **kwargs):", "body": "try:ret = super(ExactOnlineConfig, self).get(section, option, **kwargs)except (NoOptionError, NoSectionError):raise MissingSetting(option, section)return ret", "docstring": "Get method that raises MissingSetting if the value was unset.\n\nThis differs from the SafeConfigParser which may raise either a\nNoOptionError or a NoSectionError.\n\nWe take extra **kwargs because the Python 3.5 configparser extends the\nget method signature and it calls self with those parameters.\n\n def get(self, section, option, *, raw=False, vars=None,\n fallback=_UNSET):", "id": "f509:c0:m1"} {"signature": "def get_or_set_default(self, section, option, value):", "body": "try:ret = self.get(section, option)except MissingSetting:self.set(section, option, value)ret = valuereturn ret", "docstring": "Base method to fetch values and to set defaults in case they\ndon't exist.", "id": "f510:c1:m0"} {"signature": "def _get_result():", "body": "result = ivoire.current_resultif result is None:raise ValueError(\"\"\"\")return result", "docstring": "Find the global result object.", "id": "f518:m0"} {"signature": "def __call__(self, name):", "body": "example = self.Example(name=name, group=self, before=self._before, after=self._after,)if self.failureException is not None:example.failureException = self.failureExceptionself.add_example(example)return example", "docstring": "Construct and return a new ``Example``.", "id": "f518:c2:m6"} {"signature": "def skip_if(self, condition, reason):", "body": "if condition:raise SkipTest(reason)", "docstring": "Skip the example if the condition is set, with the provided reason.", "id": "f518:c1:m7"} {"signature": "def transform_describe(self, node, describes, context_variable):", "body": "body = self.transform_describe_body(node.body, context_variable)return ast.ClassDef(name=\"\" + describes.title(),bases=[ast.Name(id=\"\", ctx=ast.Load())],keywords=[],starargs=None,kwargs=None,body=list(body),decorator_list=[],)", "docstring": "Transform a describe node into a ``TestCase``.\n\n``node`` is the node object.\n``describes`` is the name of the object being described.\n``context_variable`` is the name bound in the context manager (usually\n\"it\").", "id": "f523:c0:m3"} {"signature": "def parse(argv=None):", "body": "if argv is None:argv = sys.argv[:]if not argv or argv[] not in {\"\", \"\"}:argv = [\"\"] + argvarguments = _clean(_parser.parse_args(argv))return arguments", "docstring": "Parse some arguments using the parser.", "id": "f526:m1"} {"signature": "def should_color(when):", "body": "if when == \"\":return sys.stderr.isatty()return when == \"\"", "docstring": "Decide whether to color output.", "id": "f526:m0"} {"signature": "def skip(self, example, reason):", "body": "return \"\"", "docstring": "A skip was encountered.", "id": "f536:c3:m10"} {"signature": "def exit_context(self, depth):", "body": "return \"\"", "docstring": "A context was exited.", "id": "f536:c3:m3"} {"signature": "def failure(self, example, exc_info):", "body": "return \"\"", "docstring": "A failure was encountered.", "id": "f536:c3:m9"} {"signature": "def enter_group(self, group):", "body": "return \"\"", "docstring": "A new example group was entered.", "id": "f536:c3:m4"} {"signature": "def __getattr__(self, attr):", "body": "return getattr(self._formatter, attr)", "docstring": "Delegate to the wrapped formatter.", "id": "f536:c2:m1"} {"signature": "def success(self, example):", "body": "return \"\"", "docstring": "A success was encountered.", "id": "f536:c3:m11"} {"signature": "def enter_context(self, context, depth):", "body": "return \"\"", "docstring": "A new context was entered.", "id": "f536:c3:m2"} {"signature": "def load_from_path(path):", "body": "if os.path.isdir(path):paths = discover(path)else:paths = [path]for path in paths:name = os.path.basename(os.path.splitext(path)[])imp.load_source(name, path)", "docstring": "Load a spec from a given path, discovering specs if a directory is given.", "id": "f537:m1"} {"signature": "def load_by_name(name):", "body": "if os.path.exists(name):load_from_path(name)else:__import__(name)", "docstring": "Load a spec from either a file path or a fully qualified name.", "id": "f537:m0"} {"signature": "def get_email_options(self):", "body": "return {}", "docstring": "Override this method to change default e-mail options", "id": "f548:c4:m0"} {"signature": "def dispose(json_str):", "body": "result_str = list(json_str)escaped = Falsenormal = Truesl_comment = Falseml_comment = Falsequoted = Falsea_step_from_comment = Falsea_step_from_comment_away = Falseformer_index = Nonefor index, char in enumerate(json_str):if escaped: escaped = Falsecontinueif a_step_from_comment: if char != '' and char != '':a_step_from_comment = Falsenormal = Truecontinueif a_step_from_comment_away: if char != '':a_step_from_comment_away = Falseif char == '':if normal and not escaped:quoted = Truenormal = Falseelif quoted and not escaped:quoted = Falsenormal = Trueelif char == '':if normal or quoted:escaped = Trueelif char == '':if a_step_from_comment:a_step_from_comment = Falsesl_comment = Truenormal = Falseformer_index = index - elif a_step_from_comment_away:a_step_from_comment_away = Falsenormal = Trueml_comment = Falsefor i in range(former_index, index + ):result_str[i] = \"\"elif normal:a_step_from_comment = Truenormal = Falseelif char == '':if a_step_from_comment:a_step_from_comment = Falseml_comment = Truenormal = Falseformer_index = index - elif ml_comment:a_step_from_comment_away = Trueelif char == '':if sl_comment:sl_comment = Falsenormal = Truefor i in range(former_index, index + ):result_str[i] = \"\"elif char == '' or char == '':if normal:_remove_last_comma(result_str, index)return (\"\" if isinstance(json_str, str) else u\"\").join(result_str)", "docstring": "Clear all comments in json_str.\n\n Clear JS-style comments like // and /**/ in json_str.\n Accept a str or unicode as input.\n\n Args:\n json_str: A json string of str or unicode to clean up comment\n\n Returns:\n str: The str without comments (or unicode if you pass in unicode)", "id": "f573:m0"} {"signature": "def job_set_done(self, js):", "body": "if self._closed:returnif self._active_js != js:returntry:while self._active_js.is_done():logger.debug(\"\")self._active_js = self._js_queue.popleft()logger.debug(\"\")except IndexError:self._active_js = Noneelse:self._distribute_jobs()", "docstring": "Called when a job set has been completed or cancelled. If the job set\nwas active, the next incomplete job set is loaded from the job set\nqueue and is activated.", "id": "f576:c6:m6"} {"signature": "async def wait_changed(self):", "body": "if not self.is_complete():waiter = self._loop.create_future()self._waiters.append(waiter)await waiter", "docstring": "Waits until the result set changes. Possible changes can be a result\nbeing added or the result set becoming complete. If the result set is\nalready completed, this method returns immediately.", "id": "f576:c2:m8"} {"signature": "def add_result(self, result):", "body": "if self._active_jobs == :returnself._results.add(result)self._active_jobs -= if self._active_jobs == :self._done()", "docstring": "Adds the result of a completed job to the result list, then decrements\nthe active job count. If the job set is already complete, the result is\nsimply discarded instead.", "id": "f576:c5:m7"} {"signature": "def close(self):", "body": "if self._closed:returnself._closed = Trueif self._active_js is not None:self._active_js.cancel()for js in self._js_queue:js.cancel()", "docstring": "Closes the job manager. No more jobs will be assigned, no more job sets\nwill be added, and any queued or active job sets will be cancelled.", "id": "f576:c6:m8"} {"signature": "def aiter(self): ", "body": "return ResultsIterator(self)", "docstring": "Returns an async iterator over the results.", "id": "f576:c2:m4"} {"signature": "def return_job(self, job):", "body": "if self._closed:returnjs = self._job_sources[job]if len(self._ready_callbacks) > :callback = self._ready_callbacks.popleft()callback(job)else:del self._job_sources[job]js.return_job(job)", "docstring": "Returns a job to its source job set to be run again later.", "id": "f576:c6:m4"} {"signature": "def get_job(self, callback):", "body": "assert not self._closedif self._active_js is None or not self._active_js.job_available():self._ready_callbacks.append(callback)else:job = self._active_js.get_job()self._job_sources[job] = self._active_jscallback(job)", "docstring": "Calls the given callback function when a job becomes available.", "id": "f576:c6:m3"} {"signature": "def add(self, result):", "body": "assert not self._completeself._results.append(result)self._change()", "docstring": "Adds a new result.", "id": "f576:c2:m5"} {"signature": "def add_result(self, job, result):", "body": "if self._closed:returnjs = self._job_sources[job]del self._job_sources[job]js.add_result(result)", "docstring": "Adds the result of a job to the results list of the job's source job\nset.", "id": "f576:c6:m5"} {"signature": "def cancel(self):", "body": "self._js.cancel()", "docstring": "Cancels the job set.", "id": "f576:c4:m3"} {"signature": "async def start_master(host=\"\", port=, *, loop=None):", "body": "loop = loop if loop is not None else asyncio.get_event_loop()manager = jobs.JobManager(loop=loop)workers = set()server = await loop.create_server(lambda: WorkerProtocol(manager, workers), host, port)return Master(server, manager, workers, loop=loop)", "docstring": "Starts a new HighFive master at the given host and port, and returns it.", "id": "f578:m0"} {"signature": "def line_received(self, line):", "body": "response = json.loads(line.decode(\"\"))self._worker.response_received(response)", "docstring": "Called when a complete line is found from the remote worker. Decodes\na response object from the line, then passes it to the worker object.", "id": "f578:c0:m3"} {"signature": "def response_received(self, response):", "body": "if self._closed:returnassert self._job is not Nonelogger.debug(\"\".format(id(self)))result = self._job.get_result(response)self._manager.add_result(self._job, result)self._load_job()", "docstring": "Called when a response to a job RPC has been received. Decodes the\nresponse and finalizes the result, then reports the result to the\njob manager.", "id": "f578:c1:m3"} {"signature": "def run_worker_pool(job_handler, host=\"\", port=,*, max_workers=None):", "body": "if max_workers is None:max_workers = multiprocessing.cpu_count()processes = []for _ in range(max_workers):p = multiprocessing.Process(target=worker_main,args=(job_handler, host, port))p.start()processes.append(p)logger.debug(\"\")for p in processes:p.join()logger.debug(\"\")", "docstring": "Runs a pool of workers which connect to a remote HighFive master and begin\nexecuting calls.", "id": "f579:m2"} {"signature": "async def handle_jobs(job_handler, host, port, *, loop):", "body": "try:try:reader, writer = await asyncio.open_connection(host, port, loop=loop)except OSError:logging.error(\"\")returnwhile True:try:call_encoded = await reader.readuntil(b\"\")except (asyncio.IncompleteReadError, ConnectionResetError):breaklogging.debug(\"\")call_json = call_encoded.decode(\"\")call = json.loads(call_json)response = job_handler(call)response_json = json.dumps(response) + \"\"response_encoded = response_json.encode(\"\")writer.write(response_encoded)logging.debug(\"\")except KeyboardInterrupt:pass", "docstring": "Connects to the remote master and continuously receives calls, executes\nthem, then returns a response until interrupted.", "id": "f579:m0"} {"signature": "def setUp(self):", "body": "self.options = {'': {'': '','': ,'': '','': }}self.accounting = Accounting(self.options)", "docstring": "test up method.\n\nReturns:\n none (NoneType): None", "id": "f583:c1:m0"} {"signature": "@taskdef push(msg):", "body": "clean()local(\"\".format(msg))local(\"\")", "docstring": "Push to github.\n\n Args:\n msg (str, required): Description", "id": "f584:m2"} {"signature": "@taskdef publish(msg=\"\"):", "body": "test = check()if test.succeeded:sdist = local(\"\")if sdist.succeeded:build = local('')if build.succeeded:upload = local(\"\")if upload.succeeded:tag()", "docstring": "Deploy the app to PYPI.\n\n Args:\n msg (str, optional): Description", "id": "f584:m3"} {"signature": "@taskdef check():", "body": "test = local(\"\"\"\")", "docstring": "Test project.", "id": "f584:m4"} {"signature": "def to_fixed(self, value, precision):", "body": "precision = self._change_precision(precision, self.settings[''][''])power = pow(, precision)power = round(self.parse(value) * power) / powerreturn ''.format(value, precision, precision)", "docstring": "Implementation that treats floats more like decimals.\n\n Fixes binary rounding issues (eg. (0.615).toFixed(2) === \"0.61\")\n that present problems for accounting and finance-related software.", "id": "f585:c0:m4"} {"signature": "def format(self, number, **kwargs):", "body": "if check_type(number, ''):return map(lambda val: self.format(val, **kwargs))number = self.parse(number)if check_type(kwargs, ''):options = (self.settings[''].update(kwargs))precision = self._change_precision(options[''])negative = (lambda num: \"\" if num < else \"\")(number)base = str(int(self.to_fixed(abs(number) or , precision)), )mod = (lambda num: len(num) % if len(num) > else )(base)num = negative + (lambda num: base[:num] if num else '')(mod)num += re.sub('', '' +options[''], base[mod:])num += (lambda val: options[''] + self.to_fixed(abs(number), precision).split('')[] if val else '')(precision)return num", "docstring": "Format a given number.\n\n Format a number, with comma-separated thousands and\n custom precision/decimal places\n\n Localise by overriding the precision and thousand / decimal separators\n 2nd parameter `precision` can be an object matching `settings.number`\n\n Args:\n number (TYPE): Description\n precision (TYPE): Description\n thousand (TYPE): Description\n decimal (TYPE): Description\n\n Returns:\n name (TYPE): Description", "id": "f585:c0:m5"} {"signature": "def parse(self, value, decimal=None):", "body": "value = value or if check_type(value, ''):return map(lambda val: self.parse(val, decimal))if check_type(value, '') or check_type(value, ''):return valuedecimal = decimal or self.settings.number.decimalregex = re.compile(\"\" + decimal + \"\")unformatted = str(value)unformatted = re.sub('', \"\", unformatted)unformatted = re.sub(regex, '', unformatted)unformatted = unformatted.replace('', decimal)formatted = (lambda val: unformatted if val else )(is_num(unformatted))return formatted", "docstring": "Summary.\n\n Takes a string/array of strings, removes all formatting/cruft and\n returns the raw float value\n\n Decimal must be included in the regular expression to match floats\n (defaults to Accounting.settings.number.decimal),\n so if the number uses a non-standard decimal\n separator, provide it as the second argument.\n *\n Also matches bracketed negatives (eg. \"$ (1.99)\" => -1.99)\n\n Doesn't throw any errors (`None`s become 0) but this may change\n\nArgs:\n value (TYPE): Description\n decimal (TYPE): Description\n\nReturns:\n name (TYPE): Description", "id": "f585:c0:m3"} {"signature": "def _change_precision(self, val, base=):", "body": "if not isinstance(val, int):raise TypeError('')val = round(abs(val))val = (lambda num: base if is_num(num) else num)(val)return val", "docstring": "Check and normalise the value of precision (must be positive integer).\n\nArgs:\n val (INT): must be positive integer\n base (INT): Description\n\nReturns:\n VAL (INT): Description", "id": "f585:c0:m2"} {"signature": "def get_field_language(real_field):", "body": "return real_field.split('')[-]", "docstring": "return language for a field. i.e. returns \"en\" for \"name_en", "id": "f591:m2"} {"signature": "def canonical_fieldname(db_field):", "body": "return getattr(db_field, '', db_field.name)", "docstring": "all \"description_en\", \"description_fr\", etc. field names will return \"description", "id": "f591:m6"} {"signature": "def get_table_fields(self, db_table):", "body": "db_table_desc = self.introspection.get_table_description(self.cursor, db_table)return [t[] for t in db_table_desc]", "docstring": "get table fields from schema", "id": "f592:c0:m1"} {"signature": "def get_sync_sql(self, field_name, db_change_langs, model, db_table_fields):", "body": "qn = connection.ops.quote_namestyle = no_style()sql_output = []db_table = model._meta.db_tablewas_translatable_before = self.was_translatable_before(field_name, db_table_fields)default_f = self.get_default_field(field_name, model)default_f_required = default_f and self.get_field_required_in_db(db_table,default_f.name,value_not_implemented=False)for lang in db_change_langs:new_field = get_real_fieldname(field_name, lang)try:f = model._meta.get_field(new_field)col_type = self.get_type_of_db_field(field_name, model)field_column = f.columnexcept FieldDoesNotExist: field_column = new_fieldcol_type = self.get_type_of_db_field(field_name, model)field_sql = [style.SQL_FIELD(qn(field_column)), style.SQL_COLTYPE(col_type)]alter_colum_set = '' % qn(field_column)if default_f:alter_colum_drop = '' % qn(field_column)not_null = style.SQL_KEYWORD('')if '' in backend.__name__:alter_colum_set = '' % (qn(field_column), col_type)not_null = style.SQL_KEYWORD('')if default_f:alter_colum_drop = '' % (qn(field_column), col_type)if not new_field in db_table_fields:sql_output.append(\"\" % (qn(db_table), ''.join(field_sql)))if lang == self.default_lang and not was_translatable_before:sql_output.append(\"\" % (qn(db_table),qn(field_column), qn(field_name)))if not f.null:sql_output.append(\"\" %(qn(db_table), alter_colum_set,style.SQL_KEYWORD('')))elif default_f and not default_f.null:if lang == self.default_lang:f_required = self.get_field_required_in_db(db_table,field_column,value_not_implemented=False)if default_f.name == new_field and default_f_required:continueif not f_required:sql_output.append((\"\"\"\" % {'': qn(db_table),'': qn(field_column),'': self.get_value_default(),'': style.SQL_KEYWORD(''),}))sql_output.append(\"\" %(qn(db_table), alter_colum_set,style.SQL_KEYWORD('')))else:f_required = self.get_field_required_in_db(db_table,field_column,value_not_implemented=True)if f_required:sql_output.append((\"\" % (qn(db_table), alter_colum_drop, not_null)))if not was_translatable_before:sql_output.append(\"\" % (qn(db_table), qn(field_name)))return sql_output", "docstring": "returns SQL needed for sync schema for a new translatable field", "id": "f592:c0:m8"} {"signature": "def get_db_change_languages(self, field_name, db_table_fields):", "body": "for lang_code, lang_name in get_languages():if get_real_fieldname(field_name, lang_code) not in db_table_fields:yield lang_codefor db_table_field in db_table_fields:pattern = re.compile('' % field_name)m = pattern.match(db_table_field)if not m:continuelang = m.group('')yield lang", "docstring": "get only db changes fields", "id": "f592:c0:m3"} {"signature": "def count(self):", "body": "return self.conn.client.hlen(self.nodelist_key)", "docstring": ":rtype: int\n:returns: The number of nodes in the nodelist", "id": "f597:c0:m8"} {"signature": "def key(self, *args):", "body": "return \"\".join([str(a) for a in args])", "docstring": "Concatenates a list of arguments to provide a (hopefully) unique key to set in the cache.\n\n:param args: A list of ordered, serializable, values.\n:return: A period-delimited string concatenation of the input arguments in order.", "id": "f599:c0:m0"} {"signature": "def lock(self):", "body": "return phonon.lock.Lock(self.resource_key)", "docstring": "Locks the resource managed by this reference.", "id": "f600:c0:m1"} {"signature": "def mutable(obj):", "body": "base_cls = type(obj)class Proxy(base_cls):def __getattribute__(self, name):try:return super().__getattribute__(name)except AttributeError:return getattr(obj, name)update_wrapper(Proxy, base_cls, updated = ())return Proxy()", "docstring": "return a mutable proxy for the `obj`.\n\nall modify on the proxy will not apply on origin object.", "id": "f633:m0"} {"signature": "def prop(func=None, *,field = _UNSET,get: bool = True, set: bool = True, del_: bool = False,default = _UNSET,types: tuple = _UNSET):", "body": "def wrap(func):if not callable(func):raise TypeErrorprop_name = func.__name__key = fieldif key is _UNSET:key = '' + prop_namefget, fset, fdel = None, None, Noneif get:def fget(self):try:return self.__dict__[key]except KeyError:if default is not _UNSET:return defaultraise AttributeError(f\"\")if set:def fset(self, val):if types is not _UNSET and not isinstance(val, types):if isinstance(types, tuple):types_name = tuple(x.__name__ for x in types)else:types_name = types.__name__raise TypeError(f''f'')self.__dict__[key] = valif del_:def fdel(self):del self.__dict__[key]return property(fget, fset, fdel, func.__doc__)return wrap(func) if func else wrap", "docstring": "`prop` is a sugar for `property`.\n\n``` py\n@prop\ndef value(self):\n pass\n\n# equals:\n\n@property\ndef value(self):\n return self._value\n\n@value.setter\ndef value(self, val):\n self._value = val\n```", "id": "f634:m0"} {"signature": "def get_only(field):", "body": "return property(lambda self: getattr(self, field))", "docstring": "`get_only` is a sugar for `property`.\n\n``` py\nvalue = get_only('_value')\n\n# equals:\n\n@property\ndef value(self):\n return getattr(self, '_value')\n```", "id": "f634:m1"} {"signature": "def dispose_at_exit(exitable):", "body": "@atexit.registerdef callback():exitable.__exit__(*sys.exc_info())return exitable", "docstring": "register `exitable.__exit__()` into `atexit` module.\n\nreturn the `exitable` itself.", "id": "f635:m0"} {"signature": "def object_to_primitive(obj):", "body": "if obj is None:return objif isinstance(obj, (int, float, bool, str)):return objif isinstance(obj, (list, frozenset, set)):return [object_to_primitive(x) for x in obj]if isinstance(obj, dict):return dict([(object_to_primitive(k), object_to_primitive(v)) for k, v in obj.items()])data = vars(obj)assert isinstance(data, dict)return object_to_primitive(data)", "docstring": "convert object to primitive type so we can serialize it to data format like python.\n\nall primitive types: dict, list, int, float, bool, str, None", "id": "f640:m0"} {"signature": "def incr(self, value=):", "body": "return self.do(lambda x: x + value)", "docstring": "return new value.", "id": "f643:c1:m1"} {"signature": "def semaphore(count: int, bounded: bool=False):", "body": "lock_type = threading.BoundedSemaphore if bounded else threading.Semaphorelock_obj = lock_type(value=count)return with_it(lock_obj)", "docstring": "use `Semaphore` to keep func access thread-safety.\n\nexample:\n\n``` py\n@semaphore(3)\ndef func(): pass\n```", "id": "f644:m2"} {"signature": "def unwrap(self):", "body": "return self._obj", "docstring": "get the origin object.", "id": "f649:c1:m1"} {"signature": "def __eq__(self, other):", "body": "return self._comparer.eq(self.get_cmpvalue(), other.get_cmpvalue())", "docstring": "note: user should ensure other is instance of Wrap.", "id": "f649:c1:m4"} {"signature": "def freeze(self):", "body": "setattr(self, Freezable.__FREEZABLE_FLAG, True)", "docstring": "freeze object.", "id": "f651:c0:m0"} {"signature": "def raise_if_freezed(self):", "body": "if self.is_freezed:name = type(self).__name__raise InvalidOperationException(''.format(name=name))", "docstring": "raise `InvalidOperationException` if is freezed.", "id": "f651:c0:m2"} {"signature": "@propertydef has_value(self):", "body": "return self._value is not None", "docstring": "get whether has value or not.", "id": "f659:c0:m1"} {"signature": "def cache(descriptor=None, *, store: IStore = None):", "body": "if descriptor is None:return functools.partial(cache, store=store)hasattrs = {'': hasattr(descriptor, ''),'': hasattr(descriptor, ''),'': hasattr(descriptor, '')}descriptor_name = get_descriptor_name(descriptor)class CacheDescriptor(ICacheDescriptor):def __init__(self):if descriptor_name is not None:self.__name__ = descriptor_namecache_descriptor = CacheDescriptor()if store is None:store = FieldStore(cache_descriptor)elif not isinstance(store, IStore):raise TypeError(f'')if hasattrs['']:def get(self, obj, objtype):if obj is None:return descriptor.__get__(obj, objtype)value = store.get(self, obj, defval=NOVALUE)if value is NOVALUE:value = descriptor.__get__(obj, objtype)store.set(self, obj, value)return valueCacheDescriptor.__get__ = getif hasattrs['']:def set(self, obj, value):store.pop(self, obj)descriptor.__set__(obj, value)CacheDescriptor.__set__ = setif hasattrs['']:def delete(self, obj):store.pop(self, obj)descriptor.__delete__(obj)CacheDescriptor.__delete__ = deletereturn cache_descriptor", "docstring": "usage:\n\n``` py\n@cache\n@property\ndef name(self): pass\n```", "id": "f661:m1"} {"signature": "@abstractmethoddef get(self, descriptor, obj, *, defval=NOVALUE) -> object:", "body": "raise NotImplementedError", "docstring": "get value.", "id": "f662:c0:m1"} {"signature": "def __init__(self, store_dict: dict=None):", "body": "if store_dict is None:store_dict = WeakKeyDictionary()self._data = store_dict", "docstring": "if `store_dict` is `None`, use default dict: `WeakKeyDictionary`.", "id": "f662:c2:m0"} {"signature": "@propertydef name(self):", "body": "return self._name", "docstring": "The name of parameter. canbe null.", "id": "f663:c1:m1"} {"signature": "@yielderdef flag_housenumber(token):", "body": "if token.is_first and token.isdigit():token.kind = ''return token", "docstring": "Very basic housenumber flagging. Make your own for your specific needs.\n Eg. in addok-france:\n https://github.com/addok/addok-france/blob/master/addok_france/utils.py#L106", "id": "f686:m13"} {"signature": "def _tokenize(text):", "body": "return PATTERN.findall(text)", "docstring": "Split text into a list of tokens.", "id": "f686:m0"} {"signature": "def iter_pipe(pipe, processors):", "body": "if isinstance(pipe, str):pipe = [pipe]for it in processors:pipe = it(pipe)yield from pipe", "docstring": "Allow for iterators to return either an item or an iterator of items.", "id": "f687:m3"} {"signature": "def pair(cmd, word):", "body": "word = list(preprocess_query(word))[]key = pair_key(word)tokens = [t.decode() for t in DB.smembers(key)]tokens.sort()print(white(tokens))print(magenta(''.format(len(tokens))))", "docstring": "See all token associated with a given token.\n PAIR lilas", "id": "f699:m1"} {"signature": "@specdef register_command(subparsers):", "body": "", "docstring": "Register command for Addok CLI.", "id": "f700:m8"} {"signature": "@specdef configure(config):", "body": "", "docstring": "Configure addok by patching config object after user local config.", "id": "f700:m7"} {"signature": "def do_STRDISTANCE(self, s):", "body": "s = s.split('')if not len(s) == :print(red(''))returnone, two = sprint(white(compare_str(one, two)))", "docstring": "Print the distance score between two strings. Use |\u00a0as separator.\n STRDISTANCE rue des lilas|porte des lilas", "id": "f701:c0:m35"} {"signature": "def do_GEOHASHTOGEOJSON(self, geoh):", "body": "geoh, with_neighbors = self._match_option('', geoh)bbox = geohash.bbox(geoh)try:with_neighbors = int(with_neighbors)except TypeError:with_neighbors = def expand(bbox, geoh, depth):neighbors = geohash.neighbors(geoh)for neighbor in neighbors:other = geohash.bbox(neighbor)if with_neighbors > depth:expand(bbox, neighbor, depth + )else:if other[''] > bbox['']:bbox[''] = other['']if other[''] < bbox['']:bbox[''] = other['']if other[''] > bbox['']:bbox[''] = other['']if other[''] < bbox['']:bbox[''] = other['']if with_neighbors > :expand(bbox, geoh, )geojson = {\"\": \"\",\"\": [[[bbox[''], bbox['']],[bbox[''], bbox['']],[bbox[''], bbox['']],[bbox[''], bbox['']],[bbox[''], bbox['']]]]}print(white(json.dumps(geojson)))", "docstring": "Build GeoJSON corresponding to geohash given as parameter.\n GEOHASHTOGEOJSON u09vej04 [NEIGHBORS 0|1|2]", "id": "f701:c0:m25"} {"signature": "def do_GEOHASHMEMBERS(self, geoh):", "body": "geoh, with_neighbors = self._match_option('', geoh)key = compute_geohash_key(geoh, with_neighbors != '')if key:for id_ in DB.smembers(key):r = Result(id_)print(''.format(white(r), blue(r._id)))", "docstring": "Return members of a geohash and its neighbors.\n GEOHASHMEMBERS u09vej04 [NEIGHBORS 0]", "id": "f701:c0:m27"} {"signature": "def do_GEOHASH(self, latlon):", "body": "try:lat, lon = map(float, latlon.split())except ValueError:print(red(''.format(latlon)))else:print(white(geohash.encode(lat, lon, config.GEOHASH_PRECISION)))", "docstring": "Compute a geohash from latitude and longitude.\n GEOHASH 48.1234 2.9876", "id": "f701:c0:m26"} {"signature": "def do_REVERSE(self, latlon):", "body": "lat, lon = latlon.split()for r in reverse(float(lat), float(lon)):print(''.format(white(r), blue(r.score),blue(r.distance), blue(r._id)))", "docstring": "Do a reverse search. Args: lat lon.\n REVERSE 48.1234 2.9876", "id": "f701:c0:m33"} {"signature": "@classmethoddef from_id(self, _id):", "body": "return Result(dbkeys.document_key(_id))", "docstring": "Return a result from it's document _id.", "id": "f705:c0:m13"} {"signature": "def do_fuzzyindex(self, word):", "body": "word = list(preprocess_query(word))[]token = Token(word)neighbors = make_fuzzy(token)neighbors = [(n, DB.zcard(dbkeys.token_key(n))) for n in neighbors]neighbors.sort(key=lambda n: n[], reverse=True)for token, freq in neighbors:if freq == :breakprint(white(token), blue(freq))", "docstring": "Compute fuzzy extensions of word that exist in index.\n FUZZYINDEX lilas", "id": "f707:m4"} {"signature": "def contains_components(self, address, components):", "body": "expected = len(components)got = parsed = parse_address(address)self.assertTrue(parsed)for s, c in parsed:if components.get(c, None) == s:got += self.assertEqual(expected, got)", "docstring": "Test whether address parse contains specific components.", "id": "f714:c0:m0"} {"signature": "def have_expansion_in_common(self, str1, str2, **kw):", "body": "expansions1 = expand_address(str1, **kw)expansions2 = expand_address(str2, **kw)self.assertTrue(set(expansions1) & set(expansions2))", "docstring": "Test whether strings have at least one shared expansion.", "id": "f715:c0:m1"} {"signature": "def validate_fragment(self, fragment=None, fragment_dict=None):", "body": "fragment_dict = fragment_dict if fragment_dict else fragment.to_dict()assert fragment_dict[''] == TEST_HTMLassert fragment_dict[''] == TEST_JS_INIT_FNassert fragment_dict[''] == EXPECTED_JS_INIT_VERSIONassert fragment_dict[''] == TEST_JSON_INIT_ARGSassert fragment_dict[''] == EXPECTED_RESOURCES", "docstring": "Validates that the fields of a fragment are all correct.", "id": "f721:c0:m2"} {"signature": "@abstractmethoddef render_to_fragment(self, request, **kwargs): ", "body": "raise NotImplementedError()", "docstring": "Render this view to a fragment.", "id": "f722:c0:m3"} {"signature": "@staticmethoddef resource_to_html(resource):", "body": "if resource.mimetype == \"\":if resource.kind == \"\":return u\"\" % resource.dataelif resource.kind == \"\":return u\"\" % resource.dataelse:raise Exception(\"\" % resource.kind)elif resource.mimetype == \"\":if resource.kind == \"\":return u\"\" % resource.dataelif resource.kind == \"\":return u\"\" % resource.dataelse:raise Exception(\"\" % resource.kind)elif resource.mimetype == \"\":assert resource.kind == \"\"return resource.dataelse:raise Exception(\"\" % resource.mimetype)", "docstring": "Returns `resource` wrapped in the appropriate html tag for it's mimetype.", "id": "f723:c0:m19"} {"signature": "def add_content(self, content):", "body": "assert isinstance(content, six.text_type)self.content += content", "docstring": "Add content to this fragment.\n\n`content` is a Unicode string, HTML to append to the body of the\nfragment. It must not contain a ```` tag, or otherwise assume\nthat it is the only content on the page.", "id": "f723:c0:m4"} {"signature": "def resources_to_html(self, placement):", "body": "return ''.join(self.resource_to_html(resource)for resource in self.resourcesif resource.placement == placement)", "docstring": "Get some resource HTML for this Fragment.\n\n`placement` is \"head\" or \"foot\".\n\nReturns a unicode string, the HTML for the head or foot of the page.", "id": "f723:c0:m18"} {"signature": "def add_resources(self, fragments):", "body": "for fragment in fragments:self.add_fragment_resources(fragment)", "docstring": "Add all the resources from `fragments` to my resources.\n\nThis is used to aggregate resources from a sequence of fragments that\nshould be considered part of the current fragment.\n\nThe content from the Fragments is ignored. The caller must collect\ntogether the content into this Fragment's content.", "id": "f723:c0:m13"} {"signature": "def to_dict(self):", "body": "return {'': self.content,'': [r._asdict() for r in self.resources], '': self.js_init_fn,'': self.js_init_version,'': self.json_init_args}", "docstring": "Returns the fragment in a dictionary representation.", "id": "f723:c0:m2"} {"signature": "def WritingBloomFilter(num_elements, max_fp_prob, filename=None,ignore_case=False, want_lock=False,fdatasync_on_close=True):", "body": "new_filter = _hydra.BloomFilter.getFilter(num_elements, max_fp_prob,filename=filename, ignore_case=ignore_case,read_only=False, want_lock=want_lock,fdatasync_on_close=fdatasync_on_close)if filename:with open(''.format(filename), '') as descriptor:descriptor.write(\"\".format(num_elements))descriptor.write(\"\".format(max_fp_prob))descriptor.write(\"\".format(ignore_case))return new_filter", "docstring": "Create a read/write bloom filter with an upperbound of\n(num_elements, max_fp_prob) as a specification and using filename\nas the backing datastore.", "id": "f753:m2"} {"signature": "def getlist(self, section, option, *, raw=False, vars=None,fallback=None):", "body": "val = self.get(section, option, raw=raw, vars=vars, fallback=fallback)values = []if val:for line in val.split(\"\"):values += [s.strip() for s in line.split(\"\")]return values", "docstring": "Return the [section] option values as a list.\n The list items must be delimited with commas and/or newlines.", "id": "f755:c0:m1"} {"signature": "def copytree(src, dst, symlinks=True):", "body": "from shutil import copy2, Error, copystatnames = os.listdir(src)if not Path(dst).exists():os.makedirs(dst)errors = []for name in names:srcname = os.path.join(src, name)dstname = os.path.join(dst, name)try:if symlinks and os.path.islink(srcname):linkto = os.readlink(srcname)os.symlink(linkto, dstname)elif os.path.isdir(srcname):copytree(srcname, dstname, symlinks)else:copy2(srcname, dstname)except OSError as why:errors.append((srcname, dstname, str(why)))except Error as err:errors.extend(err.args[])try:copystat(src, dst)except OSError as why:if why.winerror is None:errors.extend((src, dst, str(why)))if errors:raise Error(errors)", "docstring": "Modified from shutil.copytree docs code sample, merges files rather than\nrequiring dst to not exist.", "id": "f756:m1"} {"signature": "def debugger():", "body": "e, m, tb = sys.exc_info()if tb is not None:_debugger.post_mortem(tb)else:_debugger.set_trace()", "docstring": "If called in the context of an exception, calls post_mortem; otherwise\n set_trace.\n ``ipdb`` is preferred over ``pdb`` if installed.", "id": "f756:m3"} {"signature": "@staticmethoddef DEFAULT_LOGGING_CONFIG(level=logging.WARN, format=LOG_FORMAT):", "body": "return {\"\": ,\"\": {\"\": {\"\": format},\"\": {\"\": \"\"},},\"\": {\"\": {\"\": \"\",\"\": \"\",\"\": \"\",\"\": \"\",},},\"\": {\"\": level,\"\": [\"\"],},\"\": {},}", "docstring": "Returns a default logging config in dict format.\n\n Compatible with logging.config.dictConfig(), this default set the root\n logger to `level` with `sys.stdout` console handler using a formatter\n initialized with `format`. A simple 'brief' formatter is defined that\n shows only the message portion any log entries.", "id": "f770:c4:m0"} {"signature": "def load_key(pubkey):", "body": "try:return load_pem_public_key(pubkey.encode(), default_backend())except ValueError:pubkey = pubkey.replace('', '').replace('', '')return load_pem_public_key(pubkey.encode(), default_backend())", "docstring": "Load public RSA key, with work-around for keys using\n incorrect header/footer format.\n\n Read more about RSA encryption with cryptography:\n https://cryptography.io/latest/hazmat/primitives/asymmetric/rsa/", "id": "f789:m0"} {"signature": "def fibonacci(self):", "body": "return ", "docstring": "The fifth num in the fibonacci sequence.", "id": "f802:c0:m3"} {"signature": "def famous_five(self):", "body": "return ['', '', '', '', '']", "docstring": "The Famous Five is the name of a series of children's\n adventure novels by English author Enid Blyton.", "id": "f802:c0:m96"} {"signature": "def flipside(self):", "body": "return ''", "docstring": "A five on the flipside should be upside down.", "id": "f802:c0:m22"} {"signature": "def item_turbo(self, item):", "body": "return self.item_description(item)", "docstring": "This can be overridden to set turbo contents.\n\n :param item:\n\n :rtype: str|unicode", "id": "f810:c1:m4"} {"signature": "def configure_analytics_yandex(self, ident, params=None):", "body": "params = params or {}data = {'': '','': ident,}if params:data[''] = '' % paramsself.analytics.append(data)", "docstring": "Configure Yandex Metrika analytics counter.\n\n :param str|unicode ident: Metrika counter ID.\n\n :param dict params: Additional params.", "id": "f810:c1:m2"} {"signature": "def configure_ad_yandex(self, ident, turbo_id=''):", "body": "self.ads.append({'': '','': ident,'': turbo_id,})", "docstring": "Configure Yandex Advertisement Network.\n\n :param str|unicode ident: Ad ID.\n\n :param str|unicode turbo_id: ID of a place (figure) on Turbo page where to put an Ad block.", "id": "f810:c1:m1"} {"signature": "def get_version():", "body": "contents = read_file(os.path.join('', ''))version = re.search('', contents)version = version.group().replace('', '').strip()return version", "docstring": "Returns version number, without module import (which can lead to ImportError\n if some dependencies are unavailable before install.", "id": "f811:m1"} {"signature": "def get_task_patterns(self):", "body": "raise NotImplementedError", "docstring": "\\\n Like everything else, a bunch of two-tuples containing a regex to match\n and a callback that takes arguments from the regex", "id": "f817:c0:m1"} {"signature": "def worker_ping_handler(self, nick, message, channel):", "body": "return '' % platform.node()", "docstring": "\\\n Respond to pings sent periodically by the BotnetBot", "id": "f817:c0:m9"} {"signature": "def register_with_boss(self):", "body": "gevent.sleep() while not self.registered.is_set():self.respond('' % platform.node(), nick=self.boss)gevent.sleep()", "docstring": "\\\n Register the worker with the boss", "id": "f817:c0:m2"} {"signature": "def done(self, nick):", "body": "self.finished.add(nick)", "docstring": "\\\n Indicate that the worker with the given nick has finished this task", "id": "f819:c1:m2"} {"signature": "def dispatch_patterns(self):", "body": "return ((self.nick_re, self.new_nick),(self.nick_change_re, self.handle_nick_change),(self.ping_re, self.handle_ping),(self.part_re, self.handle_part),(self.join_re, self.handle_join),(self.quit_re, self.handle_quit),(self.chanmsg_re, self.handle_channel_message),(self.privmsg_re, self.handle_private_message),(self.registered_re, self.handle_registered),)", "docstring": "\\\n Low-level dispatching of socket data based on regex matching, in general\n handles\n\n * In event a nickname is taken, registers under a different one\n * Responds to periodic PING messages from server\n * Dispatches to registered callbacks when\n - any user leaves or enters a room currently connected to\n - a channel message is observed\n - a private message is received", "id": "f828:c0:m11"} {"signature": "def send(self, data, force=False):", "body": "if self._registered or force:self._sock_file.write('' % data)self._sock_file.flush()else:self._out_buffer.append(data)", "docstring": "\\\n Send raw data over the wire if connection is registered. Otherewise,\n save the data to an output buffer for transmission later on.\n If the force flag is true, always send data, regardless of\n registration status.", "id": "f828:c0:m2"} {"signature": "def register_callbacks(self, callbacks):", "body": "self._callbacks.extend(callbacks)", "docstring": "\\\n Hook for registering custom callbacks for dispatch patterns", "id": "f828:c0:m12"} {"signature": "def respond(self, message, channel=None, nick=None):", "body": "self.conn.respond(message, channel, nick)", "docstring": "\\\n Wraps the connection object's respond() method", "id": "f828:c1:m7"} {"signature": "def run_bot(bot_class, host, port, nick, channels=None, ssl=None):", "body": "conn = IRCConnection(host, port, nick, ssl)bot_instance = bot_class(conn)while :if not conn.connect():breakchannels = channels or []for channel in channels:conn.join(channel)conn.enter_event_loop()", "docstring": "\\\n Convenience function to start a bot on the given network, optionally joining\n some channels", "id": "f828:m0"} {"signature": "def enter_event_loop(self):", "body": "patterns = self.dispatch_patterns()self.logger.debug('')while :try:data = self._sock_file.readline()except socket.error:data = Noneif not data:self.logger.info('')self.close()return Truedata = data.rstrip()for pattern, callback in patterns:match = pattern.match(data)if match:callback(**match.groupdict())", "docstring": "\\\n Main loop of the IRCConnection - reads from the socket and dispatches\n based on regex matching", "id": "f828:c0:m23"} {"signature": "def register_callbacks(self):", "body": "self.conn.register_callbacks(((re.compile(pattern), callback)for pattern, callback in self.command_patterns()))", "docstring": "\\\n Hook for registering callbacks with connection -- handled by __init__()", "id": "f828:c1:m1"} {"signature": "def identify_unit_framework(target_unit):", "body": "if HAS_ASTROPY:from astropy.units import UnitBaseif isinstance(target_unit, UnitBase):return ASTROPYif HAS_PINT:from pint.unit import UnitsContainerif hasattr(target_unit, '') and isinstance(target_unit.dimensionality, UnitsContainer):return PINTif HAS_QUANTITIES:from quantities.unitquantity import IrreducibleUnitfrom quantities import Quantityif isinstance(target_unit, IrreducibleUnit) or isinstance(target_unit, Quantity):return QUANTITIESraise TraitError(\"\".format(type(target_unit).__name__))", "docstring": "Identify whether the user is requesting unit validation against\nastropy.units, pint, or quantities.", "id": "f832:m0"} {"signature": "@stellar.command()@click.argument('')@click.argument('')def rename(old_name, new_name):", "body": "app = get_app()snapshot = app.get_snapshot(old_name)if not snapshot:click.echo(\"\" % old_name)sys.exit()new_snapshot = app.get_snapshot(new_name)if new_snapshot:click.echo(\"\" % new_name)sys.exit()app.rename_snapshot(snapshot, new_name)click.echo(\"\" % (old_name, new_name))", "docstring": "Renames a snapshot", "id": "f841:m9"} {"signature": "@stellar.command()def version():", "body": "click.echo(\"\" % __version__)", "docstring": "Shows version number", "id": "f841:m3"} {"signature": "@status_loggerdef write_indexes(self, table):", "body": "index_sql = super(PostgresDbWriter, self).write_indexes(table)for sql in index_sql:self.execute(sql)", "docstring": "Send DDL to create the specified `table` indexes\n\n :Parameters:\n - `table`: an instance of a :py:class:`mysql2pgsql.lib.mysql_reader.MysqlReader.Table` object that represents the table to read/write.\n\n Returns None", "id": "f858:c0:m9"} {"signature": "def close(self):", "body": "self.conn.close()", "docstring": "Closes connection to the PostgreSQL server", "id": "f858:c0:m5"} {"signature": "@status_loggerdef write_table(self, table):", "body": "table_sql, serial_key_sql = super(PostgresDbWriter, self).write_table(table)for sql in serial_key_sql + table_sql:self.execute(sql)", "docstring": "Send DDL to create the specified `table`\n\n :Parameters:\n - `table`: an instance of a :py:class:`mysql2pgsql.lib.mysql_reader.MysqlReader.Table` object that represents the table to read/write.\n\n Returns None", "id": "f858:c0:m8"} {"signature": "def get_search_results(self, request, queryset, search_term):", "body": "search_term = search_term.replace('', '')return super().get_search_results(request, queryset, search_term)", "docstring": "Allow searching by hyphenated uuid.", "id": "f876:c3:m0"} {"signature": "def parse_notification_xml(xml: str) -> Union[AliasRegistration, Payment]:", "body": "body = fromstring(xml).find('')transaction = body.find('')_user_parameters = transaction.find('')def get_named_parameter(name):return _user_parameters.find(\"\" + name + \"\")def success():return transaction.get('') == ''def parse_success():computed_signature = sign_web(body.get(''), transaction.find('').text,transaction.find('').text,transaction.find('').text)sign2 = get_named_parameter('').textif computed_signature != sign2:raise ValueError('')success = transaction.find('')d = dict(response_code=success.find('').text,response_message=success.find('').text,authorization_code=success.find('').text,acquirer_authorization_code=success.find('').text,)return {k: v for k, v in d.items() if v is not None}def parse_error():error = transaction.find('')d = dict(error_code=error.find('').text,error_message=error.find('').text,error_detail=error.find('').text)acquirer_error_code = get_named_parameter('')if acquirer_error_code is not None:d[''] = acquirer_error_code.textreturn {k: v for k, v in d.items() if v is not None}def parse_common_attributes():d = dict(transaction_id=transaction.find('').text,merchant_id=body.get(''),client_ref=transaction.get(''),amount=parse_money(transaction))payment_method = transaction.find('')if payment_method is not None:d[''] = payment_method.textrequest_type = transaction.find('')if request_type is not None:d[''] = request_type.textcredit_card_country = get_named_parameter('')if credit_card_country is not None:d[''] = credit_card_country.textexpiry_month = get_named_parameter('')if expiry_month is not None:d[''] = int(expiry_month.text)expiry_year = get_named_parameter('')if expiry_year is not None:d[''] = int(expiry_year.text)return duse_alias_parameter = get_named_parameter('')if use_alias_parameter is not None and use_alias_parameter.text == '':d = dict(parse_common_attributes())masked_card_number = get_named_parameter('')if masked_card_number is not None:d[''] = masked_card_number.textcard_alias = get_named_parameter('')if card_alias is not None:d[''] = card_alias.textif success():d[''] = Trued.update(parse_success())else:d[''] = Falsed.update(parse_error())return AliasRegistration(**d)else:if success():d = dict(success=True)cardno = get_named_parameter('')if cardno is not None:d[''] = cardno.textd.update(parse_common_attributes())d.update(parse_success())return Payment(**d)else:d = dict(success=False)d.update(parse_common_attributes())d.update(parse_error())return Payment(**d)", "docstring": "Both alias registration and payments are received here.\nWe can differentiate them by looking at the use-alias user-parameter (and verifying the amount is o).", "id": "f878:m1"} {"signature": "def write(self, fp, options=None):", "body": "output = self.render(options)if hasattr(output, ''):output.save(fp, format=self.writer.format)else:fp.write(output)", "docstring": "Renders the barcode and writes it to the file like object\n `fp`.\n\n :parameters:\n fp : File like object\n Object to write the raw data in.\n options : Dict\n The same as in `self.render`.", "id": "f911:c0:m5"} {"signature": "def set_options(self, options):", "body": "for key, val in options.items():key = key.lstrip('')if hasattr(self, key):setattr(self, key, val)", "docstring": "Sets the given options as instance attributes (only\n if they are known).\n\n :parameters:\n options : Dict\n All known instance attributes and more if the childclass\n has defined them before this call.\n\n :rtype: None", "id": "f919:c0:m4"} {"signature": "def to_ascii(self):", "body": "code = self.build()for i, line in enumerate(code):code[i] = line.replace('', '').replace('', '')return ''.join(code)", "docstring": "Returns an ascii representation of the barcode.\n\n :rtype: String", "id": "f920:c0:m5"} {"signature": "@abc.abstractmethod@asyncio.coroutinedef abort(self):", "body": "", "docstring": "Abort the authentication. The result is either the failure tuple\n(``(SASLState.FAILURE, None)``) or a :class:`SASLFailure` exception if\nthe response from the peer did not indicate abortion (e.g. another\nerror was returned by the peer or the peer indicated success).", "id": "f929:c4:m2"} {"signature": "@abc.abstractmethod@asyncio.coroutinedef initiate(self, mechanism, payload=None):", "body": "", "docstring": "Send a SASL initiation request for the given `mechanism`. Depending on\nthe `mechanism`, an initial `payload` *may* be given. The `payload` is\nthen a :class:`bytes` object which needs to be passed as initial\npayload during the initiation request.\n\nWait for a reply by the peer and return the reply as a next-state tuple\nin the format documented at :class:`SASLInterface`.", "id": "f929:c4:m0"} {"signature": "@asyncio.coroutine@abc.abstractmethoddef authenticate(self, sm, token):", "body": "", "docstring": "Execute the mechanism identified by `token` (the non-:data:`None` value\nwhich has been returned by :meth:`any_supported` before) using the\ngiven :class:`SASLStateMachine` `sm`.\n\nIf authentication fails, an appropriate exception is raised\n(:class:`AuthenticationFailure`). If the authentication fails for a\nreason unrelated to credentials, :class:`SASLFailure` is raised.", "id": "f929:c6:m1"} {"signature": "def check_bidi(chars):", "body": "if not chars:returnhas_RandALCat = any(is_RandALCat(c) for c in chars)if not has_RandALCat:returnhas_LCat = any(is_LCat(c) for c in chars)if has_LCat:raise ValueError(\"\"\"\")if not is_RandALCat(chars[]) or not is_RandALCat(chars[-]):raise ValueError(\"\")", "docstring": "Check proper bidirectionality as per stringprep. Operates on a list of\nunicode characters provided in `chars`.", "id": "f930:m4"} {"signature": "def saslprep(string, allow_unassigned=False):", "body": "chars = list(string)_saslprep_do_mapping(chars)do_normalization(chars)check_prohibited_output(chars,(stringprep.in_table_c12,stringprep.in_table_c21,stringprep.in_table_c22,stringprep.in_table_c3,stringprep.in_table_c4,stringprep.in_table_c5,stringprep.in_table_c6,stringprep.in_table_c7,stringprep.in_table_c8,stringprep.in_table_c9))check_bidi(chars)if not allow_unassigned:check_unassigned(chars,(stringprep.in_table_a1,))return \"\".join(chars)", "docstring": "Process the given `string` using the SASLprep profile. In the error cases\ndefined in `RFC 3454`_ (stringprep), a :class:`ValueError` is raised.", "id": "f930:m8"} {"signature": "def check_prohibited_output(chars, bad_tables):", "body": "violator = check_against_tables(chars, bad_tables)if violator is not None:raise ValueError(\"\"\"\".format(ord(violator)))", "docstring": "Check against prohibited output, by checking whether any of the characters\nfrom `chars` are in any of the `bad_tables`.\n\nOperates in-place on a list of code points from `chars`.", "id": "f930:m5"} {"signature": "def trace(string):", "body": "check_prohibited_output(string,(stringprep.in_table_c21,stringprep.in_table_c22,stringprep.in_table_c3,stringprep.in_table_c4,stringprep.in_table_c5,stringprep.in_table_c6,stringprep.in_table_c8,stringprep.in_table_c9,))check_bidi(string)return string", "docstring": "Implement the ``trace`` profile specified in :rfc:`4505`.", "id": "f930:m9"} {"signature": "def check_against_tables(chars, tables):", "body": "for c in chars:if any(in_table(c) for in_table in tables):return creturn None", "docstring": "Perform a check against the table predicates in `tables`. `tables` must be\na reusable iterable containing characteristic functions of character sets,\nthat is, functions which return :data:`True` if the character is in the\ntable.\n\nThe function returns the first character occuring in any of the tables or\n:data:`None` if no character matches.", "id": "f930:m2"} {"signature": "def _saslprep_do_mapping(chars):", "body": "i = while i < len(chars):c = chars[i]if stringprep.in_table_c12(c):chars[i] = \"\"elif stringprep.in_table_b1(c):del chars[i]continuei += ", "docstring": "Perform the stringprep mapping step of SASLprep. Operates in-place on a\nlist of unicode characters provided in `chars`.", "id": "f930:m7"} {"signature": "def check_unassigned(chars, bad_tables):", "body": "bad_tables = (stringprep.in_table_a1,)violator = check_against_tables(chars, bad_tables)if violator is not None:raise ValueError(\"\"\"\".format(ord(violator)))", "docstring": "Check that `chars` does not contain any unassigned code points as per\nthe given list of `bad_tables`.\n\nOperates on a list of unicode code points provided in `chars`.", "id": "f930:m6"} {"signature": "def __init__(self, geometry_string, options={}, *args, **kwargs):", "body": "self.geometry_string = geometry_stringself.options = optionssuper(HyperlinkedSorlImageField, self).__init__(*args, **kwargs)", "docstring": "Create an instance of the HyperlinkedSorlImageField image serializer.\n\nArgs:\n geometry_string (str): The size of your cropped image.\n options (Optional[dict]): A dict of sorl options.\n *args: (Optional) Default serializers.ImageField arguments.\n **kwargs: (Optional) Default serializers.ImageField keyword\n arguments.\n\nFor a description of sorl geometry strings and additional sorl options,\nplease see https://sorl-thumbnail.readthedocs.org/en/latest/examples.html?highlight=geometry#low-level-api-examples", "id": "f945:c0:m0"} {"signature": "def is_bare_exception(self, node):", "body": "return isinstance(node, Name) and node.id in self.current_except_names", "docstring": "Checks if the node is a bare exception name from an except block.", "id": "f947:c0:m16"} {"signature": "def visit_JoinedStr(self, node):", "body": "if version_info >= (, ):if self.within_logging_statement():if any(isinstance(i, FormattedValue) for i in node.values):if self.within_logging_argument():self.violations.append((node, FSTRING_VIOLATION))super(LoggingVisitor, self).generic_visit(node)", "docstring": "Process f-string arguments.", "id": "f947:c0:m7"} {"signature": "def visit_keyword(self, node):", "body": "if self.should_check_whitelist(node):if node.arg not in self.whitelist and not node.arg.startswith(\"\"):self.violations.append((self.current_logging_call, WHITELIST_VIOLATION.format(node.arg)))if self.should_check_extra_exception(node):self.check_exception_arg(node.value)super(LoggingVisitor, self).generic_visit(node)", "docstring": "Process keyword arguments.", "id": "f947:c0:m8"} {"signature": "def visit_Dict(self, node):", "body": "if self.should_check_whitelist(node):for key in node.keys:if key.s in self.whitelist or key.s.startswith(\"\"):continueself.violations.append((self.current_logging_call, WHITELIST_VIOLATION.format(key.s)))if self.should_check_extra_exception(node):for value in node.values:self.check_exception_arg(value)super(LoggingVisitor, self).generic_visit(node)", "docstring": "Process dict arguments.", "id": "f947:c0:m6"} {"signature": "def check_exc_info(self, node):", "body": "if self.current_logging_level not in ('', ''):returnfor kw in node.keywords:if kw.arg == '':if self.current_logging_level == '':violation = ERROR_EXC_INFO_VIOLATIONelse:violation = REDUNDANT_EXC_INFO_VIOLATIONself.violations.append((node, violation))", "docstring": "Reports a violation if exc_info keyword is used with logging.error or logging.exception.", "id": "f947:c0:m19"} {"signature": "def visit_Call(self, node):", "body": "if self.within_logging_statement():if self.within_logging_argument() and self.is_format_call(node):self.violations.append((node, STRING_FORMAT_VIOLATION))super(LoggingVisitor, self).generic_visit(node)returnlogging_level = self.detect_logging_level(node)if logging_level and self.current_logging_level is None:self.current_logging_level = logging_levelif logging_level is None:super(LoggingVisitor, self).generic_visit(node)returnself.current_logging_call = nodeif logging_level == \"\":self.violations.append((node, WARN_VIOLATION))self.check_exc_info(node)for index, child in enumerate(iter_child_nodes(node)):if index == :self.current_logging_argument = childif index >= :self.check_exception_arg(child)if index > and isinstance(child, keyword) and child.arg == \"\":self.current_extra_keyword = childsuper(LoggingVisitor, self).visit(child)self.current_logging_argument = Noneself.current_extra_keyword = Noneself.current_logging_call = Noneself.current_logging_level = None", "docstring": "Visit a function call.\n\nWe expect every logging statement and string format to be a function call.", "id": "f947:c0:m4"} {"signature": "def is_str_exception(self, node):", "body": "return (isinstance(node, Call)and isinstance(node.func, Name)and node.func.id in ('', '')and node.argsand self.is_bare_exception(node.args[]))", "docstring": "Checks if the node is the expression str(e) or unicode(e), where e is an exception name from an except block", "id": "f947:c0:m17"} {"signature": "def detect_logging_level(self, node):", "body": "try:if self.get_id_attr(node.func.value) == \"\":return Noneif node.func.attr in LOGGING_LEVELS:return node.func.attrexcept AttributeError:passreturn None", "docstring": "Heuristic to decide whether an AST Call is a logging call.", "id": "f947:c0:m10"} {"signature": "def getplan(self, size=\"\", axes=None, padding=None):", "body": "from numpy import dtype as gettypeplan = self.vshapeif axes is None:if isinstance(size, str):axes = arange(len(self.vshape))else:axes = arange(len(size))else:axes = asarray(axes, '')pad = array(len(self.vshape)*[, ])if padding is not None:pad[axes] = paddingif isinstance(size, tuple):plan[axes] = sizeelif isinstance(size, str):size = * float(size)elsize = gettype(self.dtype).itemsizenelements = prod(self.vshape)dims = self.vshape[self.vmask(axes)]if size <= elsize:s = ones(len(axes))else:remsize = * nelements * elsizes = []for (i, d) in enumerate(dims):minsize = remsize/dif minsize >= size:s.append()remsize = minsizecontinueelse:s.append(min(d, floor(size/minsize)))s[i+:] = plan[i+:]breakplan[axes] = selse:raise ValueError(\"\")return plan, pad", "docstring": "Identify a plan for chunking values along each dimension.\n\nGenerates an ndarray with the size (in number of elements) of chunks\nin each dimension. If provided, will estimate chunks for only a\nsubset of axes, leaving all others to the full size of the axis.\n\nParameters\n----------\nsize : string or tuple\n If str, the average size (in KB) of the chunks in all value dimensions.\n If int/tuple, an explicit specification of the number chunks in\n each moving value dimension.\n\naxes : tuple, optional, default=None\n One or more axes to estimate chunks for, if provided any\n other axes will use one chunk.\n\npadding : tuple or int, option, default=None\n Size over overlapping padding between chunks in each dimension.\n If tuple, specifies padding along each chunked dimension; if int,\n all dimensions use same padding; if None, no padding", "id": "f979:c0:m20"} {"signature": "def cache(self):", "body": "self._rdd.cache()", "docstring": "Cache the underlying RDD in memory.", "id": "f979:c0:m26"} {"signature": "def unpersist(self):", "body": "self._rdd.unpersist()", "docstring": "Remove the underlying RDD from memory.", "id": "f979:c0:m27"} {"signature": "def map_generic(self, func):", "body": "def process_record(val):newval = empty(, dtype=\"\")newval[] = func(val)return newvalrdd = self._rdd.mapValues(process_record)nchunks = self.getnumber(self.plan, self.vshape)newshape = tuple([int(s) for s in r_[self.kshape, nchunks]])newsplit = len(self.shape)return BoltArraySpark(rdd, shape=newshape, split=newsplit, ordered=self._ordered, dtype=\"\")", "docstring": "Apply a generic array -> object to each subarray\n\nThe resulting object is a BoltArraySpark of dtype object where the\nblocked dimensions are replaced with indices indication block ID.", "id": "f979:c0:m19"} {"signature": "@staticmethoddef ones(shape, context=None, axis=(,), dtype=float64, npartitions=None):", "body": "from numpy import onesreturn ConstructSpark._wrap(ones, shape, context, axis, dtype, npartitions)", "docstring": "Create a spark bolt array of ones.\n\nParameters\n----------\nshape : tuple\n The desired shape of the array.\n\ncontext : SparkContext\n A context running Spark. (see pyspark)\n\naxis : tuple, optional, default=(0,)\n Which axes to distribute the array along. The resulting\n distributed object will use keys to represent these axes,\n with the remaining axes represented by values.\n\ndtype : data-type, optional, default=float64\n The desired data-type for the array. If None, will\n be determined from the data. (see numpy)\n\nnpartitions : int\n Number of partitions for parallization.\n\nReturns\n-------\nBoltArraySpark", "id": "f980:c0:m1"} {"signature": "def _reshapebasic(self, shape):", "body": "new = tupleize(shape)old_key_size = prod(self.keys.shape)old_value_size = prod(self.values.shape)for i in range(len(new)):new_key_size = prod(new[:i])new_value_size = prod(new[i:])if new_key_size == old_key_size and new_value_size == old_value_size:return ireturn -", "docstring": "Check if the requested reshape can be broken into independant reshapes\non the keys and values. If it can, returns the index in the new shape\nseparating keys from values, otherwise returns -1", "id": "f985:c0:m30"} {"signature": "def chunk(self, size=\"\", axis=None, padding=None):", "body": "if type(size) is not str:size = tupleize((size))axis = tupleize((axis))padding = tupleize((padding))from bolt.spark.chunk import ChunkedArraychnk = ChunkedArray(rdd=self._rdd, shape=self._shape, split=self._split, dtype=self._dtype)return chnk._chunk(size, axis, padding)", "docstring": "Chunks records of a distributed array.\n\nChunking breaks arrays into subarrays, using an specified\nsize of chunks along each value dimension. Can alternatively\nspecify an average chunk byte size (in kilobytes) and the size of\nchunks (as ints) will be computed automatically.\n\nParameters\n----------\nsize : tuple, int, or str, optional, default = \"150\"\n A string giving the size in kilobytes, or a tuple with the size\n of chunks along each dimension.\n\naxis : int or tuple, optional, default = None\n One or more axis to chunk array along, if None\n will use all axes,\n\npadding: tuple or int, default = None\n Number of elements per dimension that will overlap with the adjacent chunk.\n If a tuple, specifies padding along each chunked dimension; if a int, same\n padding will be applied to all chunked dimensions.\n\nReturns\n-------\nChunkedArray", "id": "f985:c0:m24"} {"signature": "def max(self, axis=None, keepdims=False):", "body": "from numpy import maximumreturn self._stat(axis, func=maximum, keepdims=keepdims)", "docstring": "Return the maximum of the array over the given axis.\n\nParameters\n----------\naxis : tuple or int, optional, default=None\n Axis to compute statistic over, if None\n will compute over all axes\n\nkeepdims : boolean, optional, default=False\n Keep axis remaining after operation with size 1.", "id": "f985:c0:m17"} {"signature": "def std(self, axis=None, keepdims=False):", "body": "return self._stat(axis, name='', keepdims=keepdims)", "docstring": "Return the standard deviation of the array over the given axis.\n\nParameters\n----------\naxis : tuple or int, optional, default=None\n Axis to compute statistic over, if None\n will compute over all axes\n\nkeepdims : boolean, optional, default=False\n Keep axis remaining after operation with size 1.", "id": "f985:c0:m15"} {"signature": "def transpose(self, *axes):", "body": "if len(axes) == :p = arange(self.ndim-, -, -)else:p = asarray(argpack(axes))istransposeable(p, range(self.ndim))split = self.splitnew_keys, new_values = p[:split], p[split:]swapping_keys = sort(new_values[new_values < split])swapping_values = sort(new_keys[new_keys >= split])stationary_keys = sort(new_keys[new_keys < split])stationary_values = sort(new_values[new_values >= split])p_swap = r_[stationary_keys, swapping_values, swapping_keys, stationary_values]p_swap_inv = argsort(p_swap)p_x = p_swap_inv[p]p_keys, p_values = p_x[:split], p_x[split:]-splitarr = self.swap(swapping_keys, swapping_values-split)arr = arr.keys.transpose(tuple(p_keys.tolist()))arr = arr.values.transpose(tuple(p_values.tolist()))return arr", "docstring": "Return an array with the axes transposed.\n\nThis operation will incur a swap unless the\ndesiured permutation can be obtained\nonly by transpoing the keys or the values.\n\nParameters\n----------\naxes : None, tuple of ints, or n ints\n If None, will reverse axis order.", "id": "f985:c0:m26"} {"signature": "def _getbasic(self, index):", "body": "key_slices = index[:self.split]value_slices = index[self.split:]def key_check(key):def inrange(k, s):if s.step > :return s.start <= k < s.stopelse:return s.stop < k <= s.startdef check(k, s):return inrange(k, s) and mod(k - s.start, s.step) == out = [check(k, s) for k, s in zip(key, key_slices)]return all(out)def key_func(key):return tuple([(k - s.start)/s.step for k, s in zip(key, key_slices)])filtered = self._rdd.filter(lambda kv: key_check(kv[]))if self._split == self.ndim:rdd = filtered.map(lambda kv: (key_func(kv[]), kv[]))else:value_slices = [s if s.stop != - else slice(s.start, None, s.step) for s in value_slices]rdd = filtered.map(lambda kv: (key_func(kv[]), kv[][value_slices]))shape = tuple([int(ceil((s.stop - s.start) / float(s.step))) for s in index])split = self.splitreturn rdd, shape, split", "docstring": "Basic indexing (for slices or ints).", "id": "f985:c0:m20"} {"signature": "def squeeze(self, axis=None):", "body": "if not any([d == for d in self.shape]):return selfif axis is None:drop = where(asarray(self.shape) == )[]elif isinstance(axis, int):drop = asarray((axis,))elif isinstance(axis, tuple):drop = asarray(axis)else:raise ValueError(\"\")if any([self.shape[i] > for i in drop]):raise ValueError(\"\")if any(asarray(drop) < self.split):kmask = set([d for d in drop if d < self.split])kfunc = lambda k: tuple([kk for ii, kk in enumerate(k) if ii not in kmask])else:kfunc = lambda k: kif any(asarray(drop) >= self.split):vmask = tuple([d - self.split for d in drop if d >= self.split])vfunc = lambda v: v.squeeze(vmask)else:vfunc = lambda v: vrdd = self._rdd.map(lambda kv: (kfunc(kv[]), vfunc(kv[])))shape = tuple([ss for ii, ss in enumerate(self.shape) if ii not in drop])split = len([d for d in range(self.keys.ndim) if d not in drop])return self._constructor(rdd, shape=shape, split=split).__finalize__(self)", "docstring": "Remove one or more single-dimensional axes from the array.\n\nParameters\n----------\naxis : tuple or int\n One or more singleton axes to remove.", "id": "f985:c0:m31"} {"signature": "def tordd(self):", "body": "return self._rdd", "docstring": "Return the underlying RDD of the bolt array.", "id": "f985:c0:m44"} {"signature": "def _align(self, axis):", "body": "inshape(self.shape, axis)tokeys = [(a - self.split) for a in axis if a >= self.split]tovalues = [a for a in range(self.split) if a not in axis]if tokeys or tovalues:return self.swap(tovalues, tokeys)else:return self", "docstring": "Align spark bolt array so that axes for iteration are in the keys.\n\nThis operation is applied before most functional operators.\nIt ensures that the specified axes are valid, and swaps\nkey/value axes so that functional operators can be applied\nover the correct records.\n\nParameters\n----------\naxis: tuple[int]\n One or more axes that wil be iterated over by a functional operator\n\nReturns\n-------\nBoltArraySpark", "id": "f985:c0:m7"} {"signature": "@propertydef split(self):", "body": "return self._split", "docstring": "Axis at which the array is split into keys/values.", "id": "f985:c0:m37"} {"signature": "def cache(self):", "body": "self._rdd.cache()", "docstring": "Cache the underlying RDD in memory.", "id": "f985:c0:m3"} {"signature": "def swapaxes(self, axis1, axis2):", "body": "raise NotImplementedError", "docstring": "Return an array with two axes interchanged.", "id": "f986:c0:m18"} {"signature": "@propertydef ndim(self):", "body": "raise NotImplementedError", "docstring": "Number of dimensions.", "id": "f986:c0:m4"} {"signature": "def min(self, axis):", "body": "raise NotImplementedError", "docstring": "Return the minimum of the array elements over the given axis or axes.", "id": "f986:c0:m11"} {"signature": "@propertydef size(self):", "body": "raise NotImplementedError", "docstring": "Total number of elements.", "id": "f986:c0:m3"} {"signature": "def max(self, axis):", "body": "raise NotImplementedError", "docstring": "Return the maximum of the array elements over the given axis or axes.", "id": "f986:c0:m12"} {"signature": "@propertydef T(self):", "body": "raise NotImplementedError", "docstring": "Transpose by reversing the order of the axes.", "id": "f986:c0:m15"} {"signature": "@staticmethoddef concatenate(arrays, axis=):", "body": "if not isinstance(arrays, tuple):raise ValueError(\"\")arrays = tuple([asarray(a) for a in arrays])from numpy import concatenatereturn BoltArrayLocal(concatenate(arrays, axis))", "docstring": "Join a sequence of arrays together.\n\nParameters\n----------\narrays : tuple\n A sequence of array-like e.g. (a1, a2, ...)\n\naxis : int, optional, default=0\n The axis along which the arrays will be joined.\n\nReturns\n-------\nBoltArrayLocal", "id": "f987:c0:m4"} {"signature": "def toscalar(self):", "body": "if self.shape == ():return self.toarray().reshape()[]else:return self", "docstring": "Returns the single scalar element contained in an array of shape (), if\nthe array has that shape. Returns self otherwise.", "id": "f988:c0:m10"} {"signature": "def map(self, func, axis=(,)):", "body": "axes = sorted(tupleize(axis))key_shape = [self.shape[axis] for axis in axes]reshaped = self._align(axes, key_shape=key_shape)mapped = asarray(list(map(func, reshaped)))elem_shape = mapped[].shapelinearized_shape_inv = key_shape + list(elem_shape)reordered = mapped.reshape(*linearized_shape_inv)return self._constructor(reordered)", "docstring": "Apply a function across an axis.\n\nArray will be aligned so that the desired set of axes\nare in the keys, which may require a transpose/reshape.\n\nParameters\n----------\nfunc : function\n Function of a single array to apply\n\naxis : tuple or int, optional, default=(0,)\n Axis or multiple axes to apply function along.\n\nReturns\n-------\nBoltArrayLocal", "id": "f988:c0:m6"} {"signature": "def reduce(self, func, axis=):", "body": "axes = sorted(tupleize(axis))if isinstance(func, ufunc):inshape(self.shape, axes)reduced = func.reduce(self, axis=tuple(axes))else:reshaped = self._align(axes)reduced = reduce(func, reshaped)new_array = self._constructor(reduced)expected_shape = [self.shape[i] for i in range(len(self.shape)) if i not in axes]if new_array.shape != tuple(expected_shape):raise ValueError(\"\")return new_array", "docstring": "Reduce an array along an axis.\n\nApplies an associative/commutative function of two arguments\ncumulatively to all arrays along an axis. Array will be aligned\nso that the desired set of axes are in the keys, which may\nrequire a transpose/reshape.\n\nParameters\n----------\nfunc : function\n Function of two arrays that returns a single array\n\naxis : tuple or int, optional, default=(0,)\n Axis or multiple axes to reduce along.\n\nReturns\n-------\nBoltArrayLocal", "id": "f988:c0:m7"} {"signature": "def tospark(self, sc, axis=):", "body": "from bolt import arrayreturn array(self.toarray(), sc, axis=axis)", "docstring": "Converts a BoltArrayLocal into a BoltArraySpark\n\nParameters\n----------\nsc : SparkContext\n The SparkContext which will be used to create the BoltArraySpark\n\naxis : tuple or int, optional, default=0\n The axis (or axes) across which this array will be parallelized\n\nReturns\n-------\nBoltArraySpark", "id": "f988:c0:m11"} {"signature": "def tordd(self, sc, axis=):", "body": "from bolt import arrayreturn array(self.toarray(), sc, axis=axis).tordd()", "docstring": "Converts a BoltArrayLocal into an RDD\n\nParameters\n----------\nsc : SparkContext\n The SparkContext which will be used to create the BoltArraySpark\n\naxis : tuple or int, optional, default=0\n The axis (or axes) across which this array will be parallelized\n\nReturns\n-------\nRDD[(tuple, ndarray)]", "id": "f988:c0:m12"} {"signature": "def iterexpand(arry, extra):", "body": "for d in range(arry.ndim, arry.ndim+extra):arry = expand_dims(arry, axis=d)return arry", "docstring": "Expand dimensions by iteratively append empty axes.\n\nParameters\n----------\narry : ndarray\n The original array\n\nextra : int\n The number of empty axes to append", "id": "f990:m10"} {"signature": "def allstack(vals, depth=):", "body": "if type(vals[]) is ndarray:return concatenate(vals, axis=depth)else:return concatenate([allstack(x, depth+) for x in vals], axis=depth)", "docstring": "If an ndarray has been split into multiple chunks by splitting it along\neach axis at a number of locations, this function rebuilds the\noriginal array from chunks.\n\nParameters\n----------\nvals : nested lists of ndarrays\n each level of nesting of the lists representing a dimension of\n the original array.", "id": "f990:m9"} {"signature": "def isreshapeable(new, old):", "body": "new, old = tupleize(new), tupleize(old)if not prod(new) == prod(old):raise ValueError(\"\")", "docstring": "Check to see if a proposed tuple of axes is a valid reshaping of\nthe old axes by ensuring that they can be factored.\n\nParameters\n----------\nnew : tuple\n tuple of proposed axes\n\nold : tuple\n tuple of old axes", "id": "f990:m8"} {"signature": "@wrappeddef zeros(*args, **kwargs):", "body": "return lookup(*args, **kwargs).dispatch('', *args, **kwargs)", "docstring": "Create a bolt array of zeros.", "id": "f991:m4"} {"signature": "@wrappeddef array(*args, **kwargs):", "body": "return lookup(*args, **kwargs).dispatch('', *args, **kwargs)", "docstring": "Create a bolt array.", "id": "f991:m2"} {"signature": "def filter_suite(arr, b):", "body": "import randomrandom.seed()filtered = b.filter(lambda x: False)res = filtered.toarray()assert res.shape == (,)filtered = b.filter(lambda x: True)res = filtered.toarray()assert res.shape == b.shapedef filter_half(x):random.seed(x.tostring())return random.random()filtered = b.filter(lambda x: filter_half(x) < )res = filtered.toarray()assert res.shape[:] == b.shape[:]assert res.shape[] <= b.shape[]if not b.mode == \"\":filtered = b.filter(lambda x: filter_half(x) < , sort=True)res = filtered.toarray()assert res.shape[:] == b.shape[:]assert res.shape[] <= b.shape[]filtered = b.filter(lambda x: filter_half(x) < , axis=)res = filtered.toarray()assert res.shape[] <= b.shape[]assert res.shape[] == b.shape[]assert res.shape[] == b.shape[]filtered = b.filter(lambda x: filter_half(x) < , axis=)res = filtered.toarray()assert res.shape[] <= b.shape[]assert res.shape[] == b.shape[]assert res.shape[] == b.shape[]filtered = b.filter(lambda x: False, axis=(, ))res = filtered.toarray()assert res.shape == (,)filtered = b.filter(lambda x: True)res = filtered.toarray()assert res.shape == b.shapefiltered = b.filter(lambda x: filter_half(x) < , axis=(, ))res = filtered.toarray()assert res.shape[] <= b.shape[]*b.shape[]assert res.shape[] == b.shape[]", "docstring": "A set of tests for the filter operator\n\nParameters\n----------\narr: `ndarray`\n A 3D ndarray used in the construction of `b` (used to check results)\nb: `BoltArray`\n The BoltArray to be used for testing", "id": "f992:m2"} {"signature": "def apply_argument_parser(argumentsParser, options=None):", "body": "if options is not None:args = argumentsParser.parse_args(options)else:args = argumentsParser.parse_args()return args", "docstring": "Apply the argument parser.", "id": "f1013:m3"} {"signature": "def chunks(l, n):", "body": "for i in range(, len(l), n):yield l[i:i + n]", "docstring": "Yield successive n-sized chunks from l.", "id": "f1015:m0"} {"signature": "@_convert_f_to_seqs()def floyd(seqs, f=None, start=None, key=lambda x: x):", "body": "tortise, hare = seqsyield hare.next()tortise_value = tortise.next()hare_value = hare.next()while hare_value != tortise_value:yield hare_valueyield hare.next()hare_value = hare.next()tortise_value = tortise.next()if f is None:raise CycleDetected()hare_value = f(hare_value)first = tortise_value = startwhile key(tortise_value) != key(hare_value):tortise_value = f(tortise_value)hare_value = f(hare_value)first += period = hare_value = f(tortise_value)while key(tortise_value) != key(hare_value):hare_value = f(hare_value)period += raise CycleDetected(period=period, first=first)", "docstring": "Floyd's Cycle Detector.\n\n See help(cycle_detector) for more context.\n\n Args:\n\n *args: Two iterators issueing the exact same sequence:\n -or-\n f, start: Function and starting state for finite state machine\n\n Yields: \n\n Values yielded by sequence_a if it terminates, undefined if a\n cycle is found.\n\n Raises:\n\n CycleFound if exception is found; if called with f and `start`,\n the parametres `first` and `period` will be defined indicating\n the offset of start of the cycle and the cycle's period.", "id": "f1019:m3"} {"signature": "@classmethoddef start(cls, now, number, firstweekday=calendar.SATURDAY, **options):", "body": "week = cls.mask(now, firstweekday=firstweekday, **options)days = (number - ) * cls.DAYS_IN_WEEKreturn week - timedelta(days=days)", "docstring": "Return the starting datetime: ``number`` of weeks before ``now``.\n\n``firstweekday`` determines when the week starts. It defaults\nto Saturday.", "id": "f1026:c6:m0"} {"signature": "@classmethoddef mask(cls, dt, **options):", "body": "return dt.replace(minute=, second=, microsecond=)", "docstring": "Return a datetime with the same value as ``dt``, to a\nresolution of hours.", "id": "f1026:c4:m0"} {"signature": "@classmethoddef mask(cls, dt, **options):", "body": "return dt.replace(month=, day=,hour=, minute=, second=, microsecond=)", "docstring": "Return a datetime with the same value as ``dt``, to a\nresolution of years.", "id": "f1026:c8:m1"} {"signature": "@classmethoddef mask(cls, dt, **options):", "body": "return dt.replace(hour=, minute=, second=, microsecond=)", "docstring": "Return a datetime with the same value as ``dt``, to a\nresolution of days.", "id": "f1026:c5:m0"} {"signature": "@classmethoddef mask(cls, dt, **options):", "body": "raise NotImplemented", "docstring": "Return a datetime with the same value as ``dt``, keeping only\nsignificant values.", "id": "f1026:c1:m0"} {"signature": "def dates_to_keep(dates,years=, months=, weeks=, days=, firstweekday=SATURDAY,now=None):", "body": "datetimes = to_keep((datetime.combine(d, time()) for d in dates),years=years, months=months, weeks=weeks, days=days,hours=, minutes=, seconds=,firstweekday=firstweekday, now=now)return set(dt.date() for dt in datetimes)", "docstring": "Return a set of dates that should be kept, out of ``dates``.\n\nSee ``to_keep`` for a description of arguments.", "id": "f1027:m2"} {"signature": "def _w_long(x: int) -> bytes:", "body": "return (int(x) & ).to_bytes(, \"\")", "docstring": "Convert a 32-bit integer to little-endian.", "id": "f1055:m1"} {"signature": "def _exec_module(self,fullname: str,loader_state: Mapping[str, str],path_stats: Mapping[str, int],module: types.ModuleType,):", "body": "filename = loader_state[\"\"]cache_filename = loader_state[\"\"]with timed(lambda duration: logger.debug(f\"\")):all_bytecode = []def add_bytecode(bytecode: types.CodeType):all_bytecode.append(bytecode)logger.debug(f\"\")forms = reader.read_file(filename, resolver=runtime.resolve_alias)compiler.compile_module( forms,compiler.CompilerContext(filename=filename),module,collect_bytecode=add_bytecode,)cache_file_bytes = _basilisp_bytecode(path_stats[\"\"], path_stats[\"\"], all_bytecode)self._cache_bytecode(filename, cache_filename, cache_file_bytes)", "docstring": "Load and execute a non-cached Basilisp module.", "id": "f1055:c0:m10"} {"signature": "def _r_long(int_bytes: bytes) -> int:", "body": "return int.from_bytes(int_bytes, \"\")", "docstring": "Convert 4 bytes in little-endian to an integer.", "id": "f1055:m0"} {"signature": "def genname(prefix: str) -> str:", "body": "i = next_name_id()return f\"\"", "docstring": "Generate a unique function name with the given prefix.", "id": "f1056:m4"} {"signature": "def munge(s: str, allow_builtins: bool = False) -> str:", "body": "new_str = []for c in s:new_str.append(_MUNGE_REPLACEMENTS.get(c, c))new_s = \"\".join(new_str)if keyword.iskeyword(new_s):return f\"\"if not allow_builtins and new_s in builtins.__dict__:return f\"\"return new_s", "docstring": "Replace characters which are not valid in Python symbols\n with valid replacement strings.", "id": "f1056:m1"} {"signature": "def next_(o) -> Optional[ISeq]:", "body": "s = rest(o)if not s:return Nonereturn s", "docstring": "Calls rest on o. If o returns an empty sequence or None, returns None.\n Otherwise, returns the elements after the first in o.", "id": "f1057:m4"} {"signature": "def _with_attrs(**kwargs):", "body": "def decorator(f):for k, v in kwargs.items():setattr(f, k, v)return freturn decorator", "docstring": "Decorator to set attributes on a function. Returns the original\n function after setting the attributes named by the keyword arguments.", "id": "f1057:m43"} {"signature": "@classmethoddef __refer_all(cls, refers: lmap.Map, other_ns_interns: lmap.Map) -> lmap.Map:", "body": "final_refers = refersfor entry in other_ns_interns:s: sym.Symbol = entry.keyvar: Var = entry.valueif not var.is_private:final_refers = final_refers.assoc(s, var)return final_refers", "docstring": "Refer all _public_ interns from another namespace.", "id": "f1057:c2:m21"} {"signature": "def deref(o, timeout_s=None, timeout_val=None):", "body": "if isinstance(o, IDeref):return o.deref()elif isinstance(o, IBlockingDeref):return o.deref(timeout_s, timeout_val)raise TypeError(f\"\")", "docstring": "Dereference a Deref object and return its contents.\n\n If o is an object implementing IBlockingDeref and timeout_s and\n timeout_val are supplied, deref will wait at most timeout_s seconds,\n returning timeout_val if timeout_s seconds elapse and o has not\n returned.", "id": "f1057:m17"} {"signature": "def add_alias(self, alias: sym.Symbol, namespace: \"\") -> None:", "body": "self._aliases.swap(lambda m: m.assoc(alias, namespace))", "docstring": "Add a Symbol alias for the given Namespace.", "id": "f1057:c2:m12"} {"signature": "@staticmethoddef intern_unbound(ns: sym.Symbol, name: sym.Symbol, dynamic: bool = False, meta=None) -> \"\":", "body": "var_ns = Namespace.get_or_create(ns)return var_ns.intern(name, Var(var_ns, name, dynamic=dynamic, meta=meta))", "docstring": "Create a new unbound `Var` instance to the symbol `name` in namespace `ns`.", "id": "f1057:c1:m17"} {"signature": "@classmethoddef remove(cls, name: sym.Symbol) -> Optional[\"\"]:", "body": "while True:oldval: lmap.Map = cls._NAMESPACES.deref()ns: Optional[Namespace] = oldval.entry(name, None)newval = oldvalif ns is not None:newval = oldval.dissoc(name)if cls._NAMESPACES.compare_and_set(oldval, newval):return ns", "docstring": "Remove the namespace bound to the symbol `name` in the global\n namespace cache and return that namespace.\n Return None if the namespace did not exist in the cache.", "id": "f1057:c2:m27"} {"signature": "def add_import(self, sym: sym.Symbol, module: types.ModuleType, *aliases: sym.Symbol) -> None:", "body": "self._imports.swap(lambda m: m.assoc(sym, module))if aliases:self._import_aliases.swap(lambda m: m.assoc(*itertools.chain.from_iterable([(alias, sym) for alias in aliases])))", "docstring": "Add the Symbol as an imported Symbol in this Namespace. If aliases are given,\n the aliases will be applied to the", "id": "f1057:c2:m17"} {"signature": "@staticmethoddef find_in_ns(ns_sym: sym.Symbol, name_sym: sym.Symbol) -> \"\":", "body": "ns = Namespace.get(ns_sym)if ns:return ns.find(name_sym)return None", "docstring": "Return the value current bound to the name `name_sym` in the namespace\n specified by `ns_sym`.", "id": "f1057:c1:m18"} {"signature": "def _collect_args(args) -> ISeq:", "body": "if isinstance(args, tuple):return llist.list(args)raise TypeError(\"\")", "docstring": "Collect Python starred arguments into a Basilisp list.", "id": "f1057:m41"} {"signature": "def rest(o) -> Optional[ISeq]:", "body": "if o is None:return Noneif isinstance(o, ISeq):s = o.restif s is None:return lseq.EMPTYreturn sn = to_seq(o)if n is None:return lseq.EMPTYreturn n.rest", "docstring": "If o is a ISeq, return the elements after the first in o. If o is None,\n returns an empty seq. Otherwise, coerces o to a seq and returns the rest.", "id": "f1057:m2"} {"signature": "@propertydef interns(self) -> VarMap:", "body": "return self._interns.deref()", "docstring": "A mapping between a symbolic name and a Var. The Var may point to\n code, data, or nothing, if it is unbound. Vars in `interns` are\n interned in _this_ namespace.", "id": "f1057:c2:m8"} {"signature": "def set_current_ns(ns_name: str,module: types.ModuleType = None,ns_var_name: str = NS_VAR_NAME,ns_var_ns: str = NS_VAR_NS,) -> Var:", "body": "symbol = sym.Symbol(ns_name)ns = Namespace.get_or_create(symbol, module=module)ns_var_sym = sym.Symbol(ns_var_name, ns=ns_var_ns)ns_var = Maybe(Var.find(ns_var_sym)).or_else_raise(lambda: RuntimeException(f\"\"))ns_var.push_bindings(ns)logger.debug(f\"\")return ns_var", "docstring": "Set the value of the dynamic variable `*ns*` in the current thread.", "id": "f1057:m47"} {"signature": "@staticmethoddef __get_or_create(ns_cache: NamespaceMap,name: sym.Symbol,module: types.ModuleType = None,core_ns_name=CORE_NS,) -> lmap.Map:", "body": "ns = ns_cache.entry(name, None)if ns is not None:return ns_cachenew_ns = Namespace(name, module=module)if name.name != core_ns_name:core_ns = ns_cache.entry(sym.symbol(core_ns_name), None)assert core_ns is not None, \"\"new_ns.refer_all(core_ns)return ns_cache.assoc(name, new_ns)", "docstring": "Private swap function used by `get_or_create` to atomically swap\n the new namespace map into the global cache.", "id": "f1057:c2:m24"} {"signature": "@staticmethoddef __completion_matcher(text: str) -> CompletionMatcher:", "body": "def is_match(entry: Tuple[sym.Symbol, Any]) -> bool:return entry[].name.startswith(text)return is_match", "docstring": "Return a function which matches any symbol keys from map entries\n against the given text.", "id": "f1057:c2:m28"} {"signature": "def partial(f, *args):", "body": "@functools.wraps(f)def partial_f(*inner_args):return f(*itertools.chain(args, inner_args))return partial_f", "docstring": "Return a function which is the partial application of f with args.", "id": "f1057:m16"} {"signature": "@propertydef aliases(self) -> NamespaceMap:", "body": "return self._aliases.deref()", "docstring": "A mapping between a symbolic alias and another Namespace. The\n fully qualified name of a namespace is also an alias for itself.", "id": "f1057:c2:m5"} {"signature": "def first(o):", "body": "if o is None:return Noneif isinstance(o, ISeq):return o.firsts = to_seq(o)if s is None:return Nonereturn s.first", "docstring": "If o is a ISeq, return the first element from o. If o is None, return\n None. Otherwise, coerces o to a Seq and returns the first.", "id": "f1057:m1"} {"signature": "def resolve_var(s: sym.Symbol, ns: Optional[Namespace] = None) -> Optional[Var]:", "body": "return Var.find(resolve_alias(s, ns))", "docstring": "Resolve the aliased symbol to a Var from the specified\n namespace, or the current namespace if none is specified.", "id": "f1057:m52"} {"signature": "@functools.singledispatchdef to_lisp(o, keywordize_keys: bool = True):", "body": "if not isinstance(o, (dict, frozenset, list, set, tuple)):return oelse: return _to_lisp_backup(o, keywordize_keys=keywordize_keys)", "docstring": "Recursively convert Python collections into Lisp collections.", "id": "f1057:m26"} {"signature": "def get_import(self, sym: sym.Symbol) -> Optional[types.ModuleType]:", "body": "mod = self.imports.entry(sym, None)if mod is None:alias = self.import_aliases.get(sym, None)if alias is None:return Nonereturn self.imports.entry(alias, None)return mod", "docstring": "Return the module if a moduled named by sym has been imported into\n this Namespace, None otherwise.\n\n First try to resolve a module directly with the given name. If no module\n can be resolved, attempt to resolve the module using import aliases.", "id": "f1057:c2:m18"} {"signature": "@staticmethoddef find_safe(ns_qualified_sym: sym.Symbol) -> \"\":", "body": "v = Var.find(ns_qualified_sym)if v is None:raise RuntimeException(f\"\")return v", "docstring": "Return the Var currently bound to the name in the namespace specified\n by `ns_qualified_sym`. If no Var is bound to that name, raise an exception.\n\n This is a utility method to return useful debugging information when code\n refers to an invalid symbol at runtime.", "id": "f1057:c1:m20"} {"signature": "def contains(coll, k):", "body": "if isinstance(coll, IAssociative):return coll.contains(k)return k in coll", "docstring": "Return true if o contains the key k.", "id": "f1057:m23"} {"signature": "def update(m, k, f, *args):", "body": "if m is None:return lmap.Map.empty().assoc(k, f(None, *args))if isinstance(m, IAssociative):old_v = m.entry(k)new_v = f(old_v, *args)return m.assoc(k, new_v)raise TypeError(f\"\")", "docstring": "Updates the value for key k in associative data structure m with the return value from\n calling f(old_v, *args). If m is None, use an empty map. If k is not in m, old_v will be\n None.", "id": "f1057:m14"} {"signature": "def get_alias(self, alias: sym.Symbol) -> \"\":", "body": "return self.aliases.entry(alias, None)", "docstring": "Get the Namespace aliased by Symbol or None if it does not exist.", "id": "f1057:c2:m13"} {"signature": "def sequence(s: Iterable) -> ISeq[Any]:", "body": "try:i = iter(s)return _Sequence(i, next(i))except StopIteration:return EMPTY", "docstring": "Create a Sequence from Iterable s.", "id": "f1060:m0"} {"signature": "def multifn(dispatch: DispatchFunction, default=None) -> MultiFunction[T]:", "body": "name = sym.symbol(dispatch.__qualname__, ns=dispatch.__module__)return MultiFunction(name, dispatch, default)", "docstring": "Decorator function which can be used to make Python multi functions.", "id": "f1061:m0"} {"signature": "@property@abstractmethoddef is_assignable(self) -> bool:", "body": "", "docstring": "True if this Node can be assigned in a set! form, False otherwise.", "id": "f1062:c2:m0"} {"signature": "@property@abstractmethoddef op(self) -> NodeOp:", "body": "", "docstring": "Enumerated keyword uniquely identifying this type of Node.\n\n The type and NodeOp should always be in sync.\n\n Having a simple enum value in addition to the type allows us to use\n switch-like syntax with Python dictionaries in compiler code, which\n is much faster than performing isinstance checks.", "id": "f1062:c1:m0"} {"signature": "def visit_Expr(self, node: ast.Expr) -> Optional[ast.Expr]:", "body": "if isinstance(node.value,(ast.Constant, ast.Name,ast.NameConstant,ast.Num,ast.Str,),):return Nonereturn node", "docstring": "Eliminate no-op constant expressions which are in the tree\n as standalone statements.", "id": "f1063:c0:m1"} {"signature": "def visit_While(self, node: ast.While) -> Optional[ast.AST]:", "body": "new_node = self.generic_visit(node)assert isinstance(new_node, ast.While)return ast.copy_location(ast.While(test=new_node.test,body=_filter_dead_code(new_node.body),orelse=_filter_dead_code(new_node.orelse),),new_node,)", "docstring": "Eliminate dead code from while bodies.", "id": "f1063:c0:m4"} {"signature": "def _with_loc(f: ParseFunction):", "body": "@wraps(f)def _parse_form(ctx: ParserContext, form: Union[LispForm, ISeq]) -> Node:form_loc = _loc(form)if form_loc is None:return f(ctx, form)else:return f(ctx, form).fix_missing_locations(form_loc)return _parse_form", "docstring": "Attach any available location information from the input form to\n the node environment returned from the parsing function.", "id": "f1064:m3"} {"signature": "def _assert_recur_is_tail(node: Node) -> None: ", "body": "if node.op == NodeOp.DO:assert isinstance(node, Do)for child in node.statements:_assert_no_recur(child)_assert_recur_is_tail(node.ret)elif node.op in {NodeOp.FN, NodeOp.FN_METHOD, NodeOp.METHOD}:assert isinstance(node, (Fn, FnMethod, Method))node.visit(_assert_recur_is_tail)elif node.op == NodeOp.IF:assert isinstance(node, If)_assert_no_recur(node.test)_assert_recur_is_tail(node.then)_assert_recur_is_tail(node.else_)elif node.op in {NodeOp.LET, NodeOp.LETFN}:assert isinstance(node, (Let, LetFn))for binding in node.bindings:assert binding.init is not None_assert_no_recur(binding.init)_assert_recur_is_tail(node.body)elif node.op == NodeOp.LOOP:assert isinstance(node, Loop)for binding in node.bindings:assert binding.init is not None_assert_no_recur(binding.init)elif node.op == NodeOp.RECUR:passelif node.op == NodeOp.TRY:assert isinstance(node, Try)_assert_recur_is_tail(node.body)for catch in node.catches:_assert_recur_is_tail(catch)if node.finally_:_assert_no_recur(node.finally_)else:node.visit(_assert_no_recur)", "docstring": "Assert that `recur` forms only appear in the tail position of this\n or child AST nodes.\n\n `recur` forms may only appear in `do` nodes (both literal and synthetic\n `do` nodes) and in either the :then or :else expression of an `if` node.", "id": "f1064:m25"} {"signature": "def parse_ast(ctx: ParserContext, form: ReaderForm) -> Node:", "body": "return _parse_ast(ctx, form).assoc(top_level=True)", "docstring": "Take a Lisp form as an argument and produce a Basilisp syntax\n tree matching the clojure.tools.analyzer AST spec.", "id": "f1064:m46"} {"signature": "def put_new_symbol( self,s: sym.Symbol,binding: Binding,warn_on_shadowed_name: bool = True,warn_on_shadowed_var: bool = True,warn_if_unused: bool = True,):", "body": "st = self.symbol_tableif warn_on_shadowed_name and self.warn_on_shadowed_name:if st.find_symbol(s) is not None:logger.warning(f\"\")if (warn_on_shadowed_name or warn_on_shadowed_var) and self.warn_on_shadowed_var:if self.current_ns.find(s) is not None:logger.warning(f\"\")if s.meta is not None and s.meta.entry(SYM_NO_WARN_WHEN_UNUSED_META_KEY, None):warn_if_unused = Falsest.new_symbol(s, binding, warn_if_unused=warn_if_unused)", "docstring": "Add a new symbol to the symbol table.\n\n This function allows individual warnings to be disabled for one run\n by supplying keyword arguments temporarily disabling those warnings.\n In certain cases, we do not want to issue warnings again for a\n previously checked case, so this is a simple way of disabling these\n warnings for those cases.\n\n If WARN_ON_SHADOWED_NAME compiler option is active and the\n warn_on_shadowed_name keyword argument is True, then a warning will be\n emitted if a local name is shadowed by another local name. Note that\n WARN_ON_SHADOWED_NAME implies WARN_ON_SHADOWED_VAR.\n\n If WARN_ON_SHADOWED_VAR compiler option is active and the\n warn_on_shadowed_var keyword argument is True, then a warning will be\n emitted if a named var is shadowed by a local name.", "id": "f1064:c3:m13"} {"signature": "def _with_meta(gen_node):", "body": "@wraps(gen_node)def with_meta(ctx: ParserContext,form: Union[llist.List, lmap.Map, ISeq, lset.Set, vec.Vector],) -> Node:assert not ctx.is_quoted, \"\"descriptor = gen_node(ctx, form)if isinstance(form, IMeta):assert isinstance(form.meta, (lmap.Map, type(None)))form_meta = _clean_meta(form.meta)if form_meta is not None:meta_ast = _parse_ast(ctx, form_meta)assert isinstance(meta_ast, MapNode) or (isinstance(meta_ast, Const) and meta_ast.type == ConstType.MAP)return WithMeta(form=form, meta=meta_ast, expr=descriptor, env=ctx.get_node_env())return descriptorreturn with_meta", "docstring": "Wraps the node generated by gen_node in a :with-meta AST node if the\n original form has meta.\n\n :with-meta AST nodes are used for non-quoted collection literals and for\n function expressions.", "id": "f1064:m5"} {"signature": "def _is_macro(v: Var) -> bool:", "body": "return (Maybe(v.meta).map(lambda m: m.entry(SYM_MACRO_META_KEY, None)) .or_else_get(False))", "docstring": "Return True if the Var holds a macro function.", "id": "f1064:m1"} {"signature": "def compile_module(forms: Iterable[ReaderForm],ctx: CompilerContext,module: types.ModuleType,collect_bytecode: Optional[BytecodeCollector] = None,) -> None:", "body": "_bootstrap_module(ctx.generator_context, ctx.py_ast_optimizer, module)for form in forms:nodes = gen_py_ast(ctx.generator_context, parse_ast(ctx.parser_context, form))_incremental_compile_module(ctx.py_ast_optimizer,nodes,module,source_filename=ctx.filename,collect_bytecode=collect_bytecode,)", "docstring": "Compile an entire Basilisp module into Python bytecode which can be\n executed as a Python module.\n\n This function is designed to generate bytecode which can be used for the\n Basilisp import machinery, to allow callers to import Basilisp modules from\n Python code.", "id": "f1066:m5"} {"signature": "def gen_py_ast(ctx: GeneratorContext, lisp_ast: Node) -> GeneratedPyAST:", "body": "op: NodeOp = lisp_ast.opassert op is not None, \"\"handle_node = _NODE_HANDLERS.get(op)assert (handle_node is not None), f\"\"return handle_node(ctx, lisp_ast)", "docstring": "Take a Lisp AST node as an argument and produce zero or more Python\n AST nodes.\n\n This is the primary entrypoint for generating AST nodes from Lisp\n syntax. It may be called recursively to compile child forms.", "id": "f1067:m80"} {"signature": "@_with_ast_locdef _quote_to_py_ast(ctx: GeneratorContext, node: Quote) -> GeneratedPyAST:", "body": "assert node.op == NodeOp.QUOTEreturn _const_node_to_py_ast(ctx, node.expr)", "docstring": "Return a Python AST Node for a `quote` expression.", "id": "f1067:m34"} {"signature": "def __fn_name(s: Optional[str]) -> str:", "body": "return genname(\"\" + munge(Maybe(s).or_else_get(_FN_PREFIX)))", "docstring": "Generate a safe Python function name from a function name symbol.\n If no symbol is provided, generate a name with a default prefix.", "id": "f1067:m19"} {"signature": "@propertydef use_var_indirection(self) -> bool:", "body": "return self._opts.entry(USE_VAR_INDIRECTION, False)", "docstring": "If True, compile all variable references using Var.find indirection.", "id": "f1067:c4:m3"} {"signature": "@_with_ast_locdef _local_sym_to_py_ast(ctx: GeneratorContext, node: Local, is_assigning: bool = False) -> GeneratedPyAST:", "body": "assert node.op == NodeOp.LOCALsym_entry = ctx.symbol_table.find_symbol(sym.symbol(node.name))assert sym_entry is not Noneif node.local == LocalType.FIELD:this_entry = ctx.symbol_table.find_symbol(ctx.current_this)assert this_entry is not None, \"\"return GeneratedPyAST(node=_load_attr(f\"\",ctx=ast.Store() if is_assigning else ast.Load(),))else:return GeneratedPyAST(node=ast.Name(id=sym_entry.munged, ctx=ast.Store() if is_assigning else ast.Load()))", "docstring": "Generate a Python AST node for accessing a locally defined Python variable.", "id": "f1067:m43"} {"signature": "def statementize(e: ast.AST) -> ast.AST:", "body": "if isinstance(e,(ast.Assign,ast.AnnAssign,ast.AugAssign,ast.Expr,ast.Raise,ast.Assert,ast.Pass,ast.Import,ast.ImportFrom,ast.If,ast.For,ast.While,ast.Continue,ast.Break,ast.Try,ast.ExceptHandler,ast.With,ast.FunctionDef,ast.Return,ast.Yield,ast.YieldFrom,ast.Global,ast.ClassDef,ast.AsyncFunctionDef,ast.AsyncFor,ast.AsyncWith,),):return ereturn ast.Expr(value=e)", "docstring": "Transform non-statements into ast.Expr nodes so they can\n stand alone as statements.", "id": "f1067:m10"} {"signature": "@_with_ast_loc_depsdef _if_to_py_ast(ctx: GeneratorContext, node: If) -> GeneratedPyAST:", "body": "assert node.op == NodeOp.IFtest_ast = gen_py_ast(ctx, node.test)result_name = genname(_IF_RESULT_PREFIX)then_ast = __if_body_to_py_ast(ctx, node.then, result_name)else_ast = __if_body_to_py_ast(ctx, node.else_, result_name)test_name = genname(_IF_TEST_PREFIX)test_assign = ast.Assign(targets=[ast.Name(id=test_name, ctx=ast.Store())], value=test_ast.node)ifstmt = ast.If(test=ast.BoolOp(op=ast.Or(),values=[ast.Compare(left=ast.NameConstant(None),ops=[ast.Is()],comparators=[ast.Name(id=test_name, ctx=ast.Load())],),ast.Compare(left=ast.NameConstant(False),ops=[ast.Is()],comparators=[ast.Name(id=test_name, ctx=ast.Load())],),],),values=[],body=list(map(statementize, chain(else_ast.dependencies, [else_ast.node]))),orelse=list(map(statementize, chain(then_ast.dependencies, [then_ast.node]))),)return GeneratedPyAST(node=ast.Name(id=result_name, ctx=ast.Load()),dependencies=list(chain(test_ast.dependencies, [test_assign, ifstmt])),)", "docstring": "Generate an intermediate if statement which assigns to a temporary\n variable, which is returned as the expression value at the end of\n evaluation.\n\n Every expression in Basilisp is true if it is not the literal values nil\n or false. This function compiles direct checks for the test value against\n the Python values None and False to accommodate this behavior.\n\n Note that the if and else bodies are switched in compilation so that we\n can perform a short-circuit or comparison, rather than exhaustively checking\n for both false and nil each time.", "id": "f1067:m29"} {"signature": "@_with_ast_loc_depsdef __multi_arity_fn_to_py_ast( ctx: GeneratorContext,node: Fn,methods: Collection[FnMethod],def_name: Optional[str] = None,meta_node: Optional[MetaNode] = None,) -> GeneratedPyAST:", "body": "assert node.op == NodeOp.FNassert all([method.op == NodeOp.FN_METHOD for method in methods])lisp_fn_name = node.local.name if node.local is not None else Nonepy_fn_name = __fn_name(lisp_fn_name) if def_name is None else munge(def_name)py_fn_node = ast.AsyncFunctionDef if node.is_async else ast.FunctionDefarity_to_name = {}rest_arity_name: Optional[str] = Nonefn_defs = []for method in methods:arity_name = f\"\"if method.is_variadic:rest_arity_name = arity_nameelse:arity_to_name[method.fixed_arity] = arity_namewith ctx.new_symbol_table(arity_name), ctx.new_recur_point(method.loop_id, RecurType.FN, is_variadic=node.is_variadic):if lisp_fn_name is not None:ctx.symbol_table.new_symbol(sym.symbol(lisp_fn_name), py_fn_name, LocalType.FN)fn_args, varg, fn_body_ast = __fn_args_to_py_ast(ctx, method.params, method.body)fn_defs.append(py_fn_node(name=arity_name,args=ast.arguments(args=fn_args,kwarg=None,vararg=varg,kwonlyargs=[],defaults=[],kw_defaults=[],),body=fn_body_ast,decorator_list=[_TRAMPOLINE_FN_NAME]if ctx.recur_point.has_recurelse [],returns=None,))dispatch_fn_ast = __multi_arity_dispatch_fn(ctx,py_fn_name,arity_to_name,default_name=rest_arity_name,max_fixed_arity=node.max_fixed_arity,meta_node=meta_node,is_async=node.is_async,)return GeneratedPyAST(node=dispatch_fn_ast.node,dependencies=list(chain(fn_defs, dispatch_fn_ast.dependencies)),)", "docstring": "Return a Python AST node for a function with multiple arities.", "id": "f1067:m26"} {"signature": "@propertydef warn_on_var_indirection(self) -> bool:", "body": "return not self.use_var_indirection and self._opts.entry(WARN_ON_VAR_INDIRECTION, True)", "docstring": "If True, warn when a Var reference cannot be direct linked (iff\n use_var_indirection is False)..", "id": "f1067:c4:m4"} {"signature": "def __var_find_to_py_ast(var_name: str, ns_name: str, py_var_ctx: ast.AST) -> GeneratedPyAST:", "body": "return GeneratedPyAST(node=ast.Attribute(value=ast.Call(func=_FIND_VAR_FN_NAME,args=[ast.Call(func=_NEW_SYM_FN_NAME,args=[ast.Str(var_name)],keywords=[ast.keyword(arg=\"\", value=ast.Str(ns_name))],)],keywords=[],),attr=\"\",ctx=py_var_ctx,))", "docstring": "Generate Var.find calls for the named symbol.", "id": "f1067:m44"} {"signature": "def _is_redefable(v: Var) -> bool:", "body": "return (Maybe(v.meta).map(lambda m: m.get(SYM_REDEF_META_KEY, None)) .or_else_get(False))", "docstring": "Return True if the Var can be redefined.", "id": "f1067:m9"} {"signature": "def _load_attr(name: str, ctx: ast.AST = ast.Load()) -> ast.Attribute:", "body": "attrs = name.split(\"\")def attr_node(node, idx):if idx >= len(attrs):node.ctx = ctxreturn nodereturn attr_node(ast.Attribute(value=node, attr=attrs[idx], ctx=ast.Load()), idx + )return attr_node(ast.Name(id=attrs[], ctx=ast.Load()), )", "docstring": "Generate recursive Python Attribute AST nodes for resolving nested\n names.", "id": "f1067:m1"} {"signature": "def py_module_preamble(ctx: GeneratorContext,) -> GeneratedPyAST:", "body": "preamble: List[ast.AST] = []preamble.extend(_module_imports(ctx))preamble.append(_from_module_import())preamble.append(_ns_var())return GeneratedPyAST(node=ast.NameConstant(None), dependencies=preamble)", "docstring": "Bootstrap a new module with imports and other boilerplate.", "id": "f1067:m84"} {"signature": "@_with_ast_locdef _maybe_host_form_to_py_ast(_: GeneratorContext, node: MaybeHostForm) -> GeneratedPyAST:", "body": "assert node.op == NodeOp.MAYBE_HOST_FORMreturn GeneratedPyAST(node=_load_attr(f\"\"))", "docstring": "Generate a Python AST node for accessing a potential Python module\n variable name with a namespace.", "id": "f1067:m49"} {"signature": "def _with_meta_to_py_ast(ctx: GeneratorContext, node: WithMeta, **kwargs) -> GeneratedPyAST:", "body": "assert node.op == NodeOp.WITH_METAhandle_expr = _WITH_META_EXPR_HANDLER.get(node.expr.op)assert (handle_expr is not None), \"\"return handle_expr(ctx, node.expr, meta_node=node.meta, **kwargs)", "docstring": "Generate a Python AST node for Python interop method calls.", "id": "f1067:m57"} {"signature": "@_with_ast_locdef _set_bang_to_py_ast(ctx: GeneratorContext, node: SetBang) -> GeneratedPyAST:", "body": "assert node.op == NodeOp.SET_BANGval_temp_name = genname(\"\")val_ast = gen_py_ast(ctx, node.val)target = node.targetassert isinstance(target, (HostField, Local, VarRef)), f\"\"if isinstance(target, HostField):target_ast = _interop_prop_to_py_ast(ctx, target, is_assigning=True)elif isinstance(target, VarRef):target_ast = _var_sym_to_py_ast(ctx, target, is_assigning=True)elif isinstance(target, Local):target_ast = _local_sym_to_py_ast(ctx, target, is_assigning=True)else: raise GeneratorException(f\"\", lisp_ast=target)return GeneratedPyAST(node=ast.Name(id=val_temp_name, ctx=ast.Load()),dependencies=list(chain(val_ast.dependencies,[ast.Assign(targets=[ast.Name(id=val_temp_name, ctx=ast.Store())],value=val_ast.node,)],target_ast.dependencies,[ast.Assign(targets=[target_ast.node], value=val_ast.node)],)),)", "docstring": "Return a Python AST Node for a `set!` expression.", "id": "f1067:m39"} {"signature": "@_with_ast_loc_depsdef _import_to_py_ast(ctx: GeneratorContext, node: Import) -> GeneratedPyAST:", "body": "assert node.op == NodeOp.IMPORTlast = Nonedeps: List[ast.AST] = []for alias in node.aliases:safe_name = munge(alias.name)try:module = importlib.import_module(safe_name)if alias.alias is not None:ctx.add_import(sym.symbol(alias.name), module, sym.symbol(alias.alias))else:ctx.add_import(sym.symbol(alias.name), module)except ModuleNotFoundError as e:raise ImportError(f\"\", node.form, node) from epy_import_alias = (munge(alias.alias)if alias.alias is not Noneelse safe_name.split(\"\", maxsplit=)[])deps.append(ast.Assign(targets=[ast.Name(id=py_import_alias, ctx=ast.Store())],value=ast.Call(func=_load_attr(\"\"),args=[ast.Str(safe_name)],keywords=[],),))last = ast.Name(id=py_import_alias, ctx=ast.Load())deps.append(ast.Call(func=_load_attr(f\"\"),args=[ast.Call(func=_NEW_SYM_FN_NAME, args=[ast.Str(safe_name)], keywords=[]),last,],keywords=[],))assert last is not None, \"\"return GeneratedPyAST(node=last, dependencies=deps)", "docstring": "Return a Python AST node for a Basilisp `import*` expression.", "id": "f1067:m30"} {"signature": "def map(kvs: Mapping[K, V], meta=None) -> Map[K, V]: ", "body": "return Map(pmap(initial=kvs), meta=meta)", "docstring": "Creates a new map.", "id": "f1072:m0"} {"signature": "def m(**kvs) -> Map[str, V]:", "body": "return Map(pmap(initial=kvs))", "docstring": "Creates a new map from keyword arguments.", "id": "f1072:m1"} {"signature": "def _read_meta(ctx: ReaderContext) -> IMeta:", "body": "start = ctx.reader.advance()assert start == \"\"meta = _read_next_consuming_comment(ctx)meta_map: Optional[lmap.Map[LispForm, LispForm]] = Noneif isinstance(meta, symbol.Symbol):meta_map = lmap.map({keyword.keyword(\"\"): meta})elif isinstance(meta, keyword.Keyword):meta_map = lmap.map({meta: True})elif isinstance(meta, lmap.Map):meta_map = metaelse:raise SyntaxError(f\"\")obj_with_meta = _read_next_consuming_comment(ctx)try:return obj_with_meta.with_meta(meta_map) except AttributeError:raise SyntaxError(f\"\")", "docstring": "Read metadata and apply that to the next object in the\n input stream.", "id": "f1073:m19"} {"signature": "def _read_reader_macro(ctx: ReaderContext) -> LispReaderForm:", "body": "start = ctx.reader.advance()assert start == \"\"token = ctx.reader.peek()if token == \"\":return _read_set(ctx)elif token == \"\":return _read_function(ctx)elif token == \"\":ctx.reader.advance()s = _read_sym(ctx)return llist.l(_VAR, s)elif token == '':return _read_regex(ctx)elif token == \"\":ctx.reader.advance()_read_next(ctx) return COMMENTelif ns_name_chars.match(token):s = _read_sym(ctx)assert isinstance(s, symbol.Symbol)v = _read_next_consuming_comment(ctx)if s in ctx.data_readers:f = ctx.data_readers[s]return f(v)else:raise SyntaxError(f\"\")raise SyntaxError(f\"\")", "docstring": "Return a data structure evaluated as a reader\n macro from the input stream.", "id": "f1073:m31"} {"signature": "@_with_locdef _read_map(ctx: ReaderContext) -> lmap.Map:", "body": "reader = ctx.readerstart = reader.advance()assert start == \"\"d: MutableMapping[Any, Any] = {}while True:if reader.peek() == \"\":reader.next_token()breakk = _read_next(ctx)if k is COMMENT:continuewhile True:if reader.peek() == \"\":raise SyntaxError(\"\")v = _read_next(ctx)if v is COMMENT:continueif k in d:raise SyntaxError(f\"\")breakd[k] = vreturn lmap.map(d)", "docstring": "Return a map from the input stream.", "id": "f1073:m14"} {"signature": "def _read_regex(ctx: ReaderContext) -> Pattern:", "body": "s = _read_str(ctx, allow_arbitrary_escapes=True)try:return langutil.regex_from_str(s)except re.error:raise SyntaxError(f\"\")", "docstring": "Read a regex reader macro from the input stream.", "id": "f1073:m30"} {"signature": "def peek(self) -> str:", "body": "return self._buffer[self._idx]", "docstring": "Peek at the next character in the stream.", "id": "f1073:c2:m5"} {"signature": "def _read_namespaced(ctx: ReaderContext, allowed_suffix: Optional[str] = None) -> Tuple[Optional[str], str]:", "body": "ns: List[str] = []name: List[str] = []reader = ctx.readerhas_ns = Falsewhile True:token = reader.peek()if token == \"\":reader.next_token()if has_ns:raise SyntaxError(\"\")elif len(name) == :name.append(\"\")else:if \"\" in name:raise SyntaxError(\"\")has_ns = Truens = namename = []elif ns_name_chars.match(token):reader.next_token()name.append(token)elif allowed_suffix is not None and token == allowed_suffix:reader.next_token()name.append(token)else:breakns_str = None if not has_ns else \"\".join(ns)name_str = \"\".join(name)if ns_str is None:if \"\" in name_str and name_str != \"\":raise SyntaxError(\"\")assert ns_str is None or len(ns_str) > return ns_str, name_str", "docstring": "Read a namespaced token from the input stream.", "id": "f1073:m8"} {"signature": "def _is_unquote(form: ReaderForm) -> bool:", "body": "try:return form.first == _UNQUOTE except AttributeError:return False", "docstring": "Return True if this form is unquote.", "id": "f1073:m22"} {"signature": "def _update_loc(self, c):", "body": "if newline_chars.match(c):self._col.append()self._line.append(self._line[-] + )else:self._col.append(self._col[-] + )self._line.append(self._line[-])", "docstring": "Update the internal line and column buffers after a new character\n is added.\n\n The column number is set to 0, so the first character on the next line\n is column number 1.", "id": "f1073:c2:m4"} {"signature": "def _read_coll(ctx: ReaderContext,f: Callable[[Collection[Any]], Union[llist.List, lset.Set, vector.Vector]],end_token: str,coll_name: str,):", "body": "coll: List = []reader = ctx.readerwhile True:token = reader.peek()if token == \"\":raise SyntaxError(f\"\")if whitespace_chars.match(token):reader.advance()continueif token == end_token:reader.next_token()return f(coll)elem = _read_next(ctx)if elem is COMMENT:continuecoll.append(elem)", "docstring": "Read a collection from the input stream and create the\n collection using f.", "id": "f1073:m9"} {"signature": "def symbol(name: str, ns: Optional[str] = None, meta=None) -> Symbol:", "body": "return Symbol(name, ns=ns, meta=meta)", "docstring": "Create a new symbol.", "id": "f1074:m0"} {"signature": "def v(*members: T, meta: Optional[IPersistentMap] = None) -> Vector[T]:", "body": "return Vector(pvector(members), meta=meta)", "docstring": "Creates a new vector from members.", "id": "f1075:m1"} {"signature": "def _dec_print_level(lvl: PrintCountSetting):", "body": "if isinstance(lvl, int):return lvl - return lvl", "docstring": "Decrement the print level if it is numeric.", "id": "f1076:m0"} {"signature": "@abstractmethoddef _lrepr(self, **kwargs) -> str:", "body": "raise NotImplementedError()", "docstring": "Private Lisp representation method. Callers (including object\n internal callers) should not call this method directly, but instead\n should use the module function .lrepr().", "id": "f1076:c0:m2"} {"signature": "def _process_kwargs(**kwargs):", "body": "return dict(kwargs, print_level=_dec_print_level(kwargs[\"\"]))", "docstring": "Process keyword arguments, decreasing the print-level. Should be called\n after examining the print level for the current level.", "id": "f1076:m1"} {"signature": "def _lrepr_fallback( o: Any,human_readable: bool = False,print_dup: bool = PRINT_DUP,print_length: PrintCountSetting = PRINT_LENGTH,print_level: PrintCountSetting = PRINT_LEVEL,print_meta: bool = PRINT_META,print_readably: bool = PRINT_READABLY,) -> str: ", "body": "kwargs = {\"\": human_readable,\"\": print_dup,\"\": print_length,\"\": print_level,\"\": print_meta,\"\": print_readably,}if isinstance(o, bool):return _lrepr_bool(o)elif o is None:return _lrepr_nil(o)elif isinstance(o, str):return _lrepr_str(o, human_readable=human_readable, print_readably=print_readably)elif isinstance(o, dict):return _lrepr_py_dict(o, **kwargs)elif isinstance(o, list):return _lrepr_py_list(o, **kwargs)elif isinstance(o, set):return _lrepr_py_set(o, **kwargs)elif isinstance(o, tuple):return _lrepr_py_tuple(o, **kwargs)elif isinstance(o, complex):return _lrepr_complex(o)elif isinstance(o, datetime.datetime):return _lrepr_datetime(o)elif isinstance(o, Decimal):return _lrepr_decimal(o, print_dup=print_dup)elif isinstance(o, Fraction):return _lrepr_fraction(o)elif isinstance(o, Pattern):return _lrepr_pattern(o)elif isinstance(o, uuid.UUID):return _lrepr_uuid(o)else:return repr(o)", "docstring": "Fallback function for lrepr for subclasses of standard types.\n\n The singledispatch used for standard lrepr dispatches using an exact\n type match on the first argument, so we will only hit this function\n for subclasses of common Python types like strings or lists.", "id": "f1076:m5"} {"signature": "def s(*members: T, meta=None) -> Set[T]:", "body": "return Set(pset(members), meta=meta)", "docstring": "Creates a new set from members.", "id": "f1079:m1"} {"signature": "def eval_stream(stream, ctx: compiler.CompilerContext, module: types.ModuleType):", "body": "last = Nonefor form in reader.read(stream, resolver=runtime.resolve_alias):last = compiler.compile_and_exec_form(form, ctx, module)return last", "docstring": "Evaluate the forms in stdin into a Python module AST node.", "id": "f1080:m2"} {"signature": "def eval_str(s: str, ctx: compiler.CompilerContext, module: types.ModuleType, eof: Any):", "body": "last = eoffor form in reader.read_str(s, resolver=runtime.resolve_alias, eof=eof):last = compiler.compile_and_exec_form(form, ctx, module)return last", "docstring": "Evaluate the forms in a string into a Python module AST node.", "id": "f1080:m3"} {"signature": "def init():", "body": "runtime.init_ns_var()runtime.bootstrap()importer.hook_imports()importlib.import_module(\"\")", "docstring": "Initialize the runtime environment for evaluation.", "id": "f1082:m0"} {"signature": "def collect(self):", "body": "_reset_collected_tests()filename = self.fspath.basenameself.fspath.pyimport()ns = _current_ns()tests = _collected_tests()assert tests is not None, \"\"for test in tests:f: TestFunction = test.valueyield BasilispTestItem(test.name.name, self, f, ns, filename)", "docstring": "Collect all of the tests in the namespace (module) given.\n\n Basilisp's test runner imports the namespace which will (as a side\n effect) collect all of the test functions in a namespace (represented\n by `deftest` forms in Basilisp) into an atom in `basilisp.test`.\n BasilispFile.collect fetches those test functions and generates\n BasilispTestItems for PyTest to run the tests.", "id": "f1083:c1:m0"} {"signature": "def _current_ns() -> str:", "body": "var = Maybe(runtime.Var.find(_CURRENT_NS_SYM)).or_else_raise(lambda: runtime.RuntimeException(f\"\"))ns = var.value.deref()return ns.name", "docstring": "Fetch the current namespace from basilisp.test/current-ns.", "id": "f1083:m2"} {"signature": "def repr_failure(self, excinfo):", "body": "if isinstance(excinfo.value, TestFailuresInfo):exc = excinfo.valuefailures = exc.data.entry(_FAILURES_KW)messages = []for details in failures:type_ = details.entry(_TYPE_KW)if type_ == _FAILURE_KW:messages.append(self._failure_msg(details))elif type_ == _ERROR_KW:exc = details.entry(_ACTUAL_KW)line = details.entry(_LINE_KW)messages.append(self._error_msg(exc, line=line))else:assert False, \"\"return \"\".join(messages)elif isinstance(excinfo.value, Exception):return self._error_msg(excinfo.value)else:return None", "docstring": "Representation function called when self.runtest() raises an\n exception.", "id": "f1083:c2:m2"} {"signature": "@staticmethoddef status(s):", "body": "print(\"\".format(s))", "docstring": "Prints things in bold.", "id": "f1086:c0:m0"} {"signature": "def get_interaction_energy(ampal_objs, ff=None, assign_ff=True):", "body": "if ff is None:ff = FORCE_FIELDS['']if assign_ff:for ampal_obj in ampal_objs:assign_force_field(ampal_obj, ff)interactions = find_inter_ampal(ampal_objs, ff.distance_cutoff)buff_score = score_interactions(interactions, ff)return buff_score", "docstring": "Calculates the interaction energy between AMPAL objects.\n\n Parameters\n ----------\n ampal_objs: [AMPAL Object]\n A list of any AMPAL objects with `get_atoms` methods.\n ff: BuffForceField, optional\n The force field to be used for scoring. If no force field is\n provided then the most current version of the BUDE force field\n will be used.\n assign_ff: bool, optional\n If true, then force field assignment on the AMPAL object will be\n will be updated.\n\n Returns\n -------\n BUFF_score: BUFFScore\n A BUFFScore object with information about each of the interactions and\n the atoms involved.", "id": "f1089:m0"} {"signature": "def find_max_rad_npnp(self):", "body": "max_rad = max_npnp = for res, _ in self.items():if res != '':for _, ff_params in self[res].items():if max_rad < ff_params[]:max_rad = ff_params[]if max_npnp < ff_params[]:max_npnp = ff_params[]return max_rad, max_npnp", "docstring": "Finds the maximum radius and npnp in the force field.\n\n Returns\n -------\n (max_rad, max_npnp): (float, float)\n Maximum radius and npnp distance in the loaded force field.", "id": "f1090:c2:m6"} {"signature": "def assign_force_field(ampal_obj, ff):", "body": "if hasattr(ampal_obj, ''):atoms = ampal_obj.get_atoms(ligands=True, inc_alt_states=True)else:atoms = ampal_obj.get_atoms(inc_alt_states=True)for atom in atoms:w_str = Nonea_ff_id = Noneif atom.element == '':continueelif atom.parent.mol_code.upper() in ff:if atom.res_label.upper() in ff[atom.parent.mol_code]:a_ff_id = (atom.parent.mol_code.upper(),atom.res_label.upper())elif atom.res_label.upper() in ff['']:a_ff_id = ('', atom.res_label.upper())else:w_str = ('''''').format(atom.res_label, atom.parent.mol_code)elif atom.res_label.upper() in ff['']:a_ff_id = ('', atom.res_label.upper())else:w_str = ('''').format(atom.res_label, atom.parent.mol_code)if w_str:warnings.warn(w_str, NotParameterisedWarning)atom.tags[''] = a_ff_idreturn", "docstring": "Assigns force field parameters to Atoms in the AMPAL object.\n\n Parameters\n ----------\n ampal_obj : AMPAL Object\n Any AMPAL object with a `get_atoms` method.\n ff: BuffForceField\n The force field to be used for scoring.", "id": "f1090:m0"} {"signature": "def get_method_owner(meth):", "body": "if inspect.ismethod(meth):if sys.version_info < (,):return meth.im_class if meth.im_self is None else meth.im_selfelse:return meth.__self__", "docstring": "Returns the instance owning the supplied instancemethod or\nthe class owning the supplied classmethod.", "id": "f1093:m2"} {"signature": "def named_objs(objlist):", "body": "objs = []for k, obj in objlist:if hasattr(k, ''):k = k.__name__else:k = as_unicode(k)objs.append((k, obj))return objs", "docstring": "Given a list of objects, returns a dictionary mapping from\nstring name for the object to the object itself.", "id": "f1093:m1"} {"signature": "def notebook_show(obj, doc, comm):", "body": "target = obj.ref['']load_mime = ''exec_mime = ''bokeh_script, bokeh_div, _ = bokeh.embed.notebook.notebook_content(obj, comm.id)publish_display_data(data={'': encode_utf8(bokeh_div)})JS = ''.join([PYVIZ_PROXY, JupyterCommManager.js_manager])publish_display_data(data={load_mime: JS, '': JS})msg_handler = bokeh_msg_handler.format(plot_id=target)comm_js = comm.js_template.format(plot_id=target, comm_id=comm.id, msg_handler=msg_handler)bokeh_js = ''.join([comm_js, bokeh_script])publish_display_data(data={exec_mime: '', '': '','': bokeh_js},metadata={exec_mime: {'': target}})", "docstring": "Displays bokeh output inside a notebook.", "id": "f1096:m0"} {"signature": "def copy(self, obj_id, folder_id, move=False):", "body": "if folder_id.startswith(''):log.info( '''' )folder_id = self.info(folder_id)['']return super(OneDriveAPI, self).copy(obj_id, folder_id, move=move)", "docstring": "Copy specified file (object) to a folder.\n Note that folders cannot be copied, this is an API limitation.", "id": "f1114:c11:m3"} {"signature": "def put( self, path_or_tuple, folder_id='',overwrite=None, downsize=None, bits_api_fallback=True ):", "body": "api_overwrite = self._translate_api_flag(overwrite, '', [''])api_downsize = self._translate_api_flag(downsize, '')name, src = self._process_upload_source(path_or_tuple)if not isinstance(bits_api_fallback, (int, float, long)):bits_api_fallback = bool(bits_api_fallback)if bits_api_fallback is not False:if bits_api_fallback is True: bits_api_fallback = self.api_put_max_bytessrc.seek(, os.SEEK_END)if src.tell() >= bits_api_fallback:if bits_api_fallback > : log.info('',*((float(v) / **) for v in [src.tell(), bits_api_fallback]) )if overwrite is not None and api_overwrite != '':raise NoAPISupportError( ''''.format(overwrite) )if downsize is not None:log.info( '''', downsize )file_id = self.put_bits(path_or_tuple, folder_id=folder_id) return self.info(file_id)return self( self._api_url_join(folder_id, '', name),dict(overwrite=api_overwrite, downsize_photo_uploads=api_downsize),data=src, method='', auth_header=True )", "docstring": "Upload a file (object), possibly overwriting (default behavior)\n a file with the same \"name\" attribute, if it exists.\n\n First argument can be either path to a local file or tuple\n of \"(name, file)\", where \"file\" can be either a file-like object\n or just a string of bytes.\n\n overwrite option can be set to False to allow two identically-named\n files or \"ChooseNewName\" to let OneDrive derive some similar\n unique name. Behavior of this option mimics underlying API.\n\n downsize is a true/false API flag, similar to overwrite.\n\n bits_api_fallback can be either True/False or an integer (number of\n bytes), and determines whether method will fall back to using BITS API\n (as implemented by \"put_bits\" method) for large files. Default \"True\"\n (bool) value will use non-BITS file size limit (api_put_max_bytes, ~100 MiB)\n as a fallback threshold, passing False will force using single-request uploads.", "id": "f1114:c10:m11"} {"signature": "def get_quota(self):", "body": "return op.itemgetter('', '')(super(OneDriveAPI, self).get_quota())", "docstring": "Return tuple of (bytes_available, bytes_quota).", "id": "f1114:c11:m1"} {"signature": "def auth_user_process_url(self, url):", "body": "url = urlparse.urlparse(url)url_qs = dict(it.chain.from_iterable(urlparse.parse_qsl(v) for v in [url.query, url.fragment] ))if url_qs.get(''):raise APIAuthError(''.format(url_qs[''], url_qs.get('')) )self.auth_code = url_qs['']return self.auth_code", "docstring": "Process tokens and errors from redirect_uri.", "id": "f1114:c9:m2"} {"signature": "def info(self, obj_id=''):", "body": "return self(obj_id)", "docstring": "Return metadata of a specified object.\n See http://msdn.microsoft.com/en-us/library/live/hh243648.aspx\n for the list and description of metadata keys for each object type.", "id": "f1114:c10:m9"} {"signature": "def info_update(self, obj_id, data):", "body": "return self(obj_id, method='', data=data, auth_header=True)", "docstring": "Update metadata with of a specified object.\n See http://msdn.microsoft.com/en-us/library/live/hh243648.aspx\n for the list of RW keys for each object type.", "id": "f1114:c10:m15"} {"signature": "@classmethoddef from_conf(cls, path=None, **overrides):", "body": "from onedrive import portalockerimport yamlif path is None:path = cls.conf_path_defaultlog.debug('', path)path = os.path.expanduser(path)with open(path, '') as src:portalocker.lock(src, portalocker.LOCK_SH)yaml_str = src.read()portalocker.unlock(src)conf = yaml.safe_load(yaml_str)conf.setdefault('', path)conf_cls = dict()for ns, keys in cls.conf_update_keys.viewitems():for k in keys:try:v = conf.get(ns, dict()).get(k)except AttributeError:if not cls.conf_raise_structure_errors: raiseraise KeyError(('''''' ).format(ns=ns, k=k, path=path))if v is not None: conf_cls[''.format(ns, k)] = conf[ns][k]conf_cls.update(overrides)if isinstance(conf.get('', dict()).get(''), (int, long)):log.warn( '''''', path )cid = conf['']['']if not re.search(r''.format(cid), yaml_str)and re.search(r''.format(cid), yaml_str):cid = int(''.format(cid))conf[''][''] = ''.format(cid)self = cls(**conf_cls)self.conf_save = conf['']return self", "docstring": "Initialize instance from YAML configuration file,\n writing updates (only to keys, specified by \"conf_update_keys\") back to it.", "id": "f1117:c0:m1"} {"signature": "def update_session_headers(session):", "body": "session.headers.update({'': '','': ''})return session", "docstring": "Add content type header to the session.\n\n:param: requests.sessions.Session session: A session.\n:return: A session with modified headers.\n:rtype: requests.sessions.Session", "id": "f1126:m2"} {"signature": "def set_delivery_system(self, store, postcode, fulfilment_method=FULFILMENT_METHOD.DELIVERY):", "body": "method = '' if fulfilment_method == FULFILMENT_METHOD.DELIVERY else ''params = {'': method,'': postcode,'': store.store_id}return self.__post('', json=params)", "docstring": "Set local cookies by initialising the delivery system on the remote.\nRequires a store ID and a delivery postcode.\n\n:param Store store: Store id.\n:param string postcode: A postcode.\n:return: A response having initialised the delivery system.\n:rtype: requests.Response", "id": "f1129:c0:m5"} {"signature": "def get_nearest_store(self, postcode):", "body": "return self.get_stores(postcode).local_store", "docstring": "Search for domino pizza stores using a postcode. This will only search\nfor local stores indicating delivery status and payment details.\n\n:param string postcode: A postcode.\n:return: A response containing stores matching the postcode.\n:rtype: requests.Response", "id": "f1129:c0:m4"} {"signature": "def get_basket(self):", "body": "response = self.__get('')return Basket(response.json())", "docstring": "Retrieve the basket for the current session.\n\n:return: A response containing the basket for the current session.\n:rtype: requests.Response", "id": "f1129:c0:m7"} {"signature": "def add_pizza_to_basket(self, item, variant=VARIANT.MEDIUM, quantity=):", "body": "item_variant = item[variant]ingredients = item_variant[''].update([, ])params = {'': ,'': quantity,'': variant,'': item.item_id,'': ingredients,'': ,'': [],'': }return self.__post('', json=params)", "docstring": "Add a pizza to the current basket.\n\n:param Item item: Item from menu.\n:param int variant: Item SKU id. Some defaults are defined in the VARIANT enum.\n:param int quantity: The quantity of pizza to be added.\n:return: A response having added a pizza to the current basket.\n:rtype: requests.Response", "id": "f1129:c0:m9"} {"signature": "def readme():", "body": "with open('') as infile:return infile.read()", "docstring": "Read README file", "id": "f1130:m0"} {"signature": "def seek(self, offset, whence=):", "body": "if self.closed:raise ValueError('')if whence == SEEK_SET:pos = offsetelif whence == SEEK_CUR:pos = self.pos + offsetelif whence == SEEK_END:pos = self.size + offsetif not <= pos <= self.size:raise ValueError(''.format(pos, self.size))self.pos = posreturn self.pos", "docstring": "Same as `file.seek()` but for the slice. Returns a value between\n`self.start` and `self.size` inclusive.\n\nraises:\n ValueError if the new seek position is not between 0 and\n `self.size`.", "id": "f1132:c1:m3"} {"signature": "@propertydef end(self):", "body": "return self.start + self.size - ", "docstring": "The end position of the slice. The slice will not read or write if the\nnew position is ahead of this value.", "id": "f1132:c1:m1"} {"signature": "def __exit__(self, exc_type, exc_value, exc_tb):", "body": "if self._debug >= and exc_type is not None:traceback.print_exception(exc_type, exc_value, exc_tb,file=sys.stderr)sys.exit('' %(self.method, self.ext_cls.__module__,self.ext_cls.__name__))return Falseself.ext_cls = Nonereturn exc_type and issubclass(exc_type, Exception)", "docstring": "Called upon exit from the context manager. Implements the\ndebugging output.\n\n:param exc_type: The exception type. If no exception was\n generated, will be ``None``.\n:param exc_value: The exception value. If no exception was\n generated, will be ``None``.\n:param exc_tb: The exception traceback. If no exception was\n generated, will be ``None``.\n\n:returns: A ``True`` value if any exception should be ignored,\n ``False`` otherwise. Will return ``True`` for all\n ``Exception`` subclasses unless debugging is\n enabled.", "id": "f1144:c1:m2"} {"signature": "@classmethoddef _get_extension_classes(cls):", "body": "if cls._extension_classes is None:exts = {}for ext in entry.points[NAMESPACE_EXTENSIONS]:exts.setdefault(ext.priority, [])exts[ext.priority].append(ext)cls._extension_classes = list(utils.iter_prio_dict(exts))return cls._extension_classes", "docstring": "Retrieve the extension classes in priority order.\n\n:returns: A list of extension classes, in proper priority\n order.", "id": "f1144:c2:m0"} {"signature": "def post_step(self, ctxt, step, idx, result):", "body": "debugger = ExtensionDebugger('')for ext in self.exts:with debugger(ext):ext.post_step(ctxt, step, idx, result)return result", "docstring": "Called after executing a step.\n\n:param ctxt: An instance of ``timid.context.Context``.\n:param step: An instance of ``timid.steps.Step`` describing\n the step that was executed.\n:param idx: The index of the step in the list of steps.\n:param result: An instance of ``timid.steps.StepResult``\n describing the result of executing the step.\n May be altered by the extension, e.g., to set\n the ``ignore`` attribute.\n\n:returns: The ``result`` parameter, for convenience.", "id": "f1144:c2:m6"} {"signature": "def read_steps(self, ctxt, steps):", "body": "pass", "docstring": "Called after reading steps, prior to adding them to the list of\ntest steps. This allows an extension to alter the list (in\nplace).\n\n:param ctxt: An instance of ``timid.context.Context``.\n:param steps: A list of ``timid.steps.Step`` instances.", "id": "f1144:c0:m2"} {"signature": "def __init__(self, method):", "body": "self.method = methodself.ext_cls = Nonedebug = os.environ.get('')if debug is None:self._debug = else:try:self._debug = max(, int(debug))except ValueError:self._debug = self.debug(, '' % self.method)", "docstring": "Initialize an ``ExtensionDebugger`` instance.\n\n:param where: A string indicating the name of the extension\n method that will be called.", "id": "f1144:c1:m0"} {"signature": "@abc.abstractpropertydef priority(self):", "body": "pass", "docstring": "An integer value. This provides a primitive means of controlling\nthe order in which extensions will be called. The higher the\nnumber, the later the modifier will be applied.\n\nThis must be a class attribute, not a property.", "id": "f1144:c0:m6"} {"signature": "def pre_step(self, ctxt, step, idx):", "body": "return None", "docstring": "Called prior to executing a step.\n\n:param ctxt: An instance of ``timid.context.Context``.\n:param step: An instance of ``timid.steps.Step`` describing\n the step to be executed.\n:param idx: The index of the step in the list of steps.\n\n:returns: A ``True`` value if the step is to be skipped. Any\n ``False`` value (including ``None``) will result in\n the step being executed as normal.", "id": "f1144:c0:m3"} {"signature": "def read_steps(self, ctxt, steps):", "body": "debugger = ExtensionDebugger('')for ext in self.exts:with debugger(ext):ext.read_steps(ctxt, steps)return steps", "docstring": "Called after reading steps, prior to adding them to the list of\ntest steps. Extensions are able to alter the list (in place).\n\n:param ctxt: An instance of ``timid.context.Context``.\n:param steps: A list of ``timid.steps.Step`` instances.\n\n:returns: The ``steps`` parameter, for convenience.", "id": "f1144:c2:m4"} {"signature": "def __init__(self, step_addr, action, modifiers=None, name=None,description=None):", "body": "self.step_addr = step_addrself.action = actionself.modifiers = modifiers or []self.name = name or action.__class__.__name__self.description = description", "docstring": "Initialize a ``Step`` instance.\n\n:param step_addr: The address of the step in the test\n configuration.\n:param action: An ``Action`` instance.\n:param modifiers: A list of ``Modifier`` instances, in the\n order in which processing should be\n performed. Optional.\n:param name: A name for the step. Optional.\n:param description: A description of the step. Optional.", "id": "f1145:c6:m2"} {"signature": "def action_conf(self, ctxt, action_class, action_name, config, step_addr):", "body": "return config", "docstring": "A modifier hook function. This is called in priority order prior\nto initializing the ``Action`` for the step. This allows a\nmodifier to alter the configuration to be fed to the action.\n\n:param ctxt: The context object.\n:param action_class: The ``Action`` subclass the modifier is\n modifying. This method should not\n attempt to initialize the action.\n:param action_name: The name of the action.\n:param config: The configuration for the action. This may be\n a scalar value (e.g., \"run: command\"), a list,\n or a dictionary. If the configuration provided\n is invalid for the action, a ``ConfigError``\n should be raised.\n:param step_addr: The address of the step in the test\n configuration. Should be passed to the\n ``ConfigError``.\n\n:returns: The configuration for the action, optionally\n modified. If the configuration is not modified,\n ``config`` must be returned unchanged. The default\n implementation of this method does so.", "id": "f1145:c4:m0"} {"signature": "@propertydef ignore(self):", "body": "return self._ignore or False", "docstring": "Determine whether an error result should be ignored.", "id": "f1145:c7:m2"} {"signature": "def post_call(self, ctxt, result, action, post_mod, pre_mod):", "body": "return result", "docstring": "A modifier hook function. This is called in reverse-priority\norder after invoking the ``Action`` for the step. This allows\na modifier to inspect or alter the result of the step.\n\n:param ctxt: The context object.\n:param result: The result of the action. This will be a\n ``StepResult`` object.\n:param action: The action that was performed.\n:param post_mod: A list of modifiers following this modifier\n in the list of modifiers that is applicable\n to the action. This list is in priority\n order.\n:param pre_mod: A list of modifiers preceding this modifier in\n the list of modifiers that is applicable to\n the action. This list is in priority order.\n\n:returns: The result for the action, optionally modified. If\n the result is not modified, ``result`` must be\n returned unchanged. The default implementation of\n this method does so.", "id": "f1145:c4:m2"} {"signature": "@abc.abstractpropertydef priority(self):", "body": "pass", "docstring": "An integer value. This provides a primitive means of controlling\nthe order of application of modifiers. The higher the number,\nthe later the modifier will be applied. The action has a\npriority equivalent to positive infinity.\n\nThis must be a class attribute, not a property.", "id": "f1145:c4:m3"} {"signature": "def __init__(self, fname, idx, key=None):", "body": "self.fname = fnameself.idx = idxself.key = keyself._str = None", "docstring": "Initialize a ``StepAddress`` instance.\n\n:param fname: The file name of the YAML file.\n:param idx: The index of the step within the file, or within\n the designated key of the file.\n:param key: A key within the file. If the file consists of a\n single list, this should be ``None`` (the\n default).", "id": "f1145:c1:m0"} {"signature": "def __init__(self, ctxt, name, config, step_addr):", "body": "self.validate_conf(name, config, step_addr)self.name = nameself.config = configself.step_addr = step_addr", "docstring": "Initialize the action or modifier. This should process and store\nthe configuration provided.\n\n:param ctxt: The context object.\n:param name: The name.\n:param config: The configuration. This may be a scalar value\n (e.g., \"run: command\"), a list, or a\n dictionary. If the configuration provided is\n invalid, a ``ConfigError`` should be raised.\n:param step_addr: The address of the step in the test\n configuration. Should be passed to the\n ``ConfigError``.", "id": "f1145:c2:m0"} {"signature": "def __init__(self, ctxt, name, config, step_addr):", "body": "super(SensitiveDictAction, self).__init__(ctxt, name, config, step_addr)self.set_vars = {}for key, value in config.get('', {}).items():self.set_vars[key] = ctxt.template(value)self.unset_vars = set(config.get('', []))self.sensitive_vars = set(config.get('', []))self.files = [ctxt.template(fn) for fn in config.get('', [])]self.dirname = os.path.dirname(step_addr.fname) or os.curdir", "docstring": "Initialize a ``SensitiveDictAction`` instance.\n\n:param ctxt: The context object.\n:param name: The name of the action.\n:param config: The configuration for the action. This may be\n a scalar value (e.g., \"run: command\"), a list,\n or a dictionary. If the configuration provided\n is invalid for the action, a ``ConfigError``\n should be raised.\n:param step_addr: The address of the step in the test\n configuration. Should be passed to the\n ``ConfigError``.", "id": "f1145:c8:m0"} {"signature": "def init(self, ctxt, step_addr):", "body": "return self.cls(ctxt, self.name, self.conf, step_addr)", "docstring": "Initialize the item. This calls the class constructor with the\nappropriate arguments and returns the initialized object.\n\n:param ctxt: The context object.\n:param step_addr: The address of the step in the test\n configuration.", "id": "f1145:c5:m1"} {"signature": "def __init__(self):", "body": "self._namespaces = {}", "docstring": "Initialize an ``EntrypointCache``.", "id": "f1146:c1:m0"} {"signature": "def __init__(self, namespace):", "body": "self.namespace = namespaceself._entrypoints = {}for ep in pkg_resources.iter_entry_points(namespace):self._entrypoints.setdefault(ep.name, [])self._entrypoints[ep.name].append(ep)self._epcache = {}self._eplist = None", "docstring": "Initialize a ``NamespaceCache``.\n\n:param namespace: The namespace to cache.", "id": "f1146:c0:m0"} {"signature": "def __getitem__(self, name):", "body": "if (name not in self._entrypoints orself._epcache.get(name) is _unavailable):raise KeyError(name)if name not in self._epcache:error = Nonefor ep in self._entrypoints[name]:try:self._epcache[name] = ep.load()except (ImportError, AttributeError,pkg_resources.UnknownExtra):if error is None:error = sys.exc_info()else:breakelse:self._epcache[name] = _unavailableif error is None:raise KeyError(name)six.reraise(*error)return self._epcache[name]", "docstring": "Retrieve the designated entrypoint object.\n\n:param name: The name of the entrypoint.\n\n:returns: The loaded object referenced by the entrypoint.", "id": "f1146:c0:m3"} {"signature": "def __init__(self, verbose=, debug=False, cwd=None):", "body": "self.verbose = verboseself.debug = debugself.variables = utils.SensitiveDict()self.environment = environment.Environment(cwd=cwd)self.steps = []self._jinja = jinja2.Environment()self._jinja.globals[''] = self.environment", "docstring": "Initialize a new ``Context`` instance.", "id": "f1147:c0:m0"} {"signature": "def emit(self, msg, level=, debug=False):", "body": "if debug:if not self.debug:returnstream = sys.stderrelse:if self.verbose < level:returnstream = sys.stdoutprint(msg, file=stream)stream.flush()", "docstring": "Emit a message to the user.\n\n:param msg: The message to emit. If ``debug`` is ``True``,\n the message will be emitted to ``stderr`` only if\n the ``debug`` attribute is ``True``. If ``debug``\n is ``False``, the message will be emitted to\n ``stdout`` under the control of the ``verbose``\n attribute.\n:param level: Ignored if ``debug`` is ``True``. The message\n will only be emitted if the ``verbose``\n attribute is greater than or equal to the value\n of this parameter. Defaults to 1.\n:param debug: If ``True``, marks the message as a debugging\n message. The message will only be emitted if\n the ``debug`` attribute is ``True``.", "id": "f1147:c0:m1"} {"signature": "def expression(self, string):", "body": "if not isinstance(string, six.string_types):return lambda ctxt: stringexpr = self._jinja.compile_expression(string)return lambda ctxt: expr(ctxt.variables)", "docstring": "Interpret an expression string. This returns a callable taking\none argument--this context--and returning the result of\nevaluating the expression.\n\n:param string: The expression.\n\n:returns: A callable of one argument that will return the\n desired expression.", "id": "f1147:c0:m3"} {"signature": "def post_call(self, ctxt, result, action, post_mod, pre_mod):", "body": "result.ignore = self.configreturn result", "docstring": "A modifier hook function. This is called in reverse-priority\norder after invoking the ``Action`` for the step. This allows\na modifier to inspect or alter the result of the step.\n\n:param ctxt: The context object.\n:param result: The result of the action. This will be a\n ``StepResult`` object.\n:param action: The action that was performed.\n:param post_mod: A list of modifiers following this modifier\n in the list of modifiers that is applicable\n to the action. This list is in priority\n order.\n:param pre_mod: A list of modifiers preceding this modifier in\n the list of modifiers that is applicable to\n the action. This list is in priority order.\n\n:returns: The result for the action, optionally modified. If\n the result is not modified, ``result`` must be\n returned unchanged. This implementation alters the\n ``ignore`` property of the ``result`` object to\n match the configured value.", "id": "f1148:c1:m0"} {"signature": "def __init__(self, option_strings, dest, **kwargs):", "body": "allow_type = kwargs.pop('', False)super(DictAction, self).__init__(option_strings, dest, **kwargs)self.allow_type = allow_type", "docstring": "Initialize a ``DictAction`` object.\n\n:param option_strings: A list of option strings.\n:param dest: The target attribute to store the option values\n in.\n:param allow_type: A boolean indicating whether the value type\n may be designated. If ``False``, the value\n type is always treated as a string.", "id": "f1149:c0:m0"} {"signature": "def add(self, item):", "body": "self._value.add(item)self._rebuild()", "docstring": "Add a new item to the ``SetVariable`` instance.\n\n:param item: The item to add.", "id": "f1150:c2:m2"} {"signature": "@abc.abstractpropertydef _type(self):", "body": "pass", "docstring": "The abstract type the value should be represented as, e.g.,\n``collections.Set`` or ``collections.Sequence``.", "id": "f1150:c0:m6"} {"signature": "def declare_set(self, name, sep=os.pathsep):", "body": "self._declare_special(name, sep, SetVariable)", "docstring": "Declare an environment variable as a set-like special variable.\nThis can be used even if the environment variable is not\npresent.\n\n:param name: The name of the environment variable that should\n be considered set-like.\n:param sep: The separator to be used. Defaults to the value\n of ``os.pathsep``.", "id": "f1150:c3:m7"} {"signature": "def _update(self, value):", "body": "if not value:value = self._coerce()elif isinstance(value, six.string_types):value = self._coerce(value.split(self._sep))else:value = self._coerce(value)self._value = value", "docstring": "Alert method used when the variable's value in an ``Environment``\ninstance has been altered.\n\n:param value: The new value to set the value to.", "id": "f1150:c0:m5"} {"signature": "def __getitem__(self, name):", "body": "if name not in self._data:raise KeyError(name)if name in self._special:return self._special[name]return self._data[name]", "docstring": "Retrieve the value of a variable from the environment. If the\nvariable has been declared as a list-like variable, the return\nvalue will be a list of strings; otherwise, it will be a\nstring.\n\n:param name: The name of the variable to retrieve.\n\n:returns: The value of the designated variable.", "id": "f1150:c3:m1"} {"signature": "def __str__(self):", "body": "return self._env._data.get(self._var) or ''", "docstring": "Obtain a string form of the ``EnvListVariable`` instance.\n\n:returns: The string form of the variable.", "id": "f1150:c0:m1"} {"signature": "def __len__(self):", "body": "return len(self._value)", "docstring": "Obtain the length of an ``EnvListVariable`` instance.\n\n:returns: The number of elements in the variable.", "id": "f1150:c0:m3"} {"signature": "def __setitem__(self, idx, value):", "body": "self._value[idx] = valueself._rebuild()", "docstring": "Set an item on the ``ListVariable`` instance.\n\n:param idx: The index or index-like object (e.g., slice) to\n set.\n:param value: The value to set it to.", "id": "f1150:c1:m1"} {"signature": "def __getitem__(self, idx):", "body": "return self._value[idx]", "docstring": "Retrieve an item from a ``ListVariable`` instance.\n\n:param idx: The index or index-like object (e.g., slice) to\n retrieve.\n\n:returns: The value at the designated index.", "id": "f1150:c1:m0"} {"signature": "def __delitem__(self, idx):", "body": "del self._value[idx]self._rebuild()", "docstring": "Delete an item from the ``ListVariable`` instance.\n\n:param idx: The index or index-like object (e.g., slice) to\n delete.", "id": "f1150:c1:m2"} {"signature": "def __len__(self):", "body": "return len(self._parent)", "docstring": "Obtain the length of a ``MaskedDict`` instance.\n\n:returns: The number of keys in the dictionary.", "id": "f1152:c1:m4"} {"signature": "@propertydef sensitive(self):", "body": "return frozenset(self._sensitive)", "docstring": "Retrieve a set of the \"sensitive\" keys, keys for which the data\nshould be treated as sensitive. This will be a copy;\nmodifications will not affect the set.", "id": "f1152:c0:m9"} {"signature": "def copy(self):", "body": "return self.__class__(self._data.copy(), self._sensitive.copy())", "docstring": "Retrieve a copy of the sensitive dictionary. Note that this is a\nshallow copy.", "id": "f1152:c0:m7"} {"signature": "def schema_validate(instance, schema, exc_class, *prefix, **kwargs):", "body": "try:jsonschema.validate(instance, schema)except jsonschema.ValidationError as exc:path = ''.join((a if isinstance(a, six.string_types) else '' % a)for a in itertools.chain(prefix, exc.path))message = '' % (path, exc.message)raise exc_class(message, **kwargs)", "docstring": "Schema validation helper. Performs JSONSchema validation. If a\nschema validation error is encountered, an exception of the\ndesignated class is raised with the validation error message\nappropriately simplified and passed as the sole positional\nargument.\n\n:param instance: The object to schema validate.\n:param schema: The schema to use for validation.\n:param exc_class: The exception class to raise instead of the\n ``jsonschema.ValidationError`` exception.\n:param prefix: Positional arguments are interpreted as a list of\n keys to prefix to the path contained in the\n validation error.\n:param kwargs: Keyword arguments to pass to the exception\n constructor.", "id": "f1152:m1"} {"signature": "def canonicalize_path(cwd, path):", "body": "if not os.path.isabs(path):path = os.path.join(cwd, path)return os.path.abspath(path)", "docstring": "Canonicalizes a path relative to a given working directory. That\nis, the path, if not absolute, is interpreted relative to the\nworking directory, then converted to absolute form.\n\n:param cwd: The working directory.\n:param path: The path to canonicalize.\n\n:returns: The absolute path.", "id": "f1152:m0"} {"signature": "@propertydef sensitive(self):", "body": "return self._parent.sensitive", "docstring": "Retrieve a set of the \"sensitive\" keys, keys for which the data\nshould be treated as sensitive. This will be a copy;\nmodifications will not affect the set.", "id": "f1152:c1:m7"} {"signature": "def __str__(self):", "body": "return six.text_type(dict(self))", "docstring": "Obtain a string form of a ``MaskedDict`` instance.\n\n:returns: The string form of the ``MaskedDict`` instance.", "id": "f1152:c1:m3"} {"signature": "def assert_connected(self, conn, client):", "body": "self.assertIsInstance(conn.client_protocol, DummyClientProtocol)self.assertEqual(conn.client_protocol.side, \"\")self.assertEqual(conn.client_protocol.connected, True)self.assertEqual(conn.client_protocol.disconnected_reason, None)self.assertIsInstance(conn.server_protocol, DummyServerProtocol)self.assertEqual(conn.server_protocol.side, \"\")self.assertEqual(conn.server_protocol.connected, True)self.assertEqual(conn.server_protocol.disconnected_reason, None)self.assertEqual(conn.connected, True)self.assertEqual(conn.pending, False)self.successResultOf(conn._accept_d)self.successResultOf(conn._connected_d)self.assertNoResult(conn._finished_d)self.assertEqual(conn.client_protocol, client)", "docstring": "Assert that a connection is connected to a client.", "id": "f1155:c2:m3"} {"signature": "def patch_transport_abortConnection(transport, protocol):", "body": "def abortConnection():protocol._fake_connection_aborted = Truetransport.loseConnection()patch_if_missing(transport, '', abortConnection)", "docstring": "Patch abortConnection() onto the transport if it doesn't already have it.\n(`Agent` assumes its transport has this.)", "id": "f1156:m4"} {"signature": "def wait0(r=None):", "body": "from twisted.internet import reactorreturn deferLater(reactor, , lambda: r)", "docstring": "Wait zero seconds to give the reactor a chance to work.\n\nReturns its (optional) argument, so it's useful as a callback.", "id": "f1156:m0"} {"signature": "@propertydef endpoint(self):", "body": "return FakeServerEndpoint(self)", "docstring": "Get an endpoint that connects clients to this server.", "id": "f1156:c1:m2"} {"signature": "def reject_connection(self, reason=None):", "body": "assert self.pending, \"\"if reason is None:reason = ConnectionRefusedError()self._accept_d.errback(reason)", "docstring": "Reject a pending connection.", "id": "f1156:c2:m5"} {"signature": "def await_finished(self):", "body": "return self._finished_d", "docstring": "Wait for the both sides of the connection to close.", "id": "f1156:c2:m6"} {"signature": "def row_to_dict(self, row, allele, alternate_alleles):", "body": "def _variant_sbid(**kwargs):\"\"\"\"\"\"return ''.format(**kwargs).upper()if allele == '':allele = row.REF or allelegenomic_coordinates = {'': self.genome_build,'': row.CHROM,'': row.POS,'': row.POS + len(row.REF) - }variant_sbid = _variant_sbid(allele=allele,**genomic_coordinates)return {'': genomic_coordinates,'': variant_sbid,'': allele,'': row.ID,'': row.REF,'': alternate_alleles,'': self._parse_info(row.INFO),'': row.QUAL,'': row.FILTER}", "docstring": "Return a parsed dictionary for JSON.", "id": "f1158:c1:m10"} {"signature": "def next(self):", "body": "def _alt(alt):\"\"\"\"\"\"if not alt:return ''else:return str(alt)if not self._next:row = next(self.reader)alternate_alleles = list(map(_alt, row.ALT))for allele in alternate_alleles:self._next.append(self.row_to_dict(row,allele=allele,alternate_alleles=alternate_alleles))self._line_number += return self._next.pop()", "docstring": "Expands multiple alleles into one record each\nusing an internal buffer (_next).", "id": "f1158:c1:m9"} {"signature": "@app.callback(dash.dependencies.Output('', ''),[dash.dependencies.Input('', '')])def display_page(pathname):", "body": "return layout()", "docstring": "Render the layout depending on the pathname.", "id": "f1166:m2"} {"signature": "def logout(self):", "body": "if self._oauth_client_secret:try:oauth_token = flask.request.cookies[self.TOKEN_COOKIE_NAME]requests.post(urljoin(self._api_host, self.OAUTH2_REVOKE_TOKEN_PATH),data={'': self._oauth_client_id,'': self._oauth_client_secret,'': oauth_token})except:passresponse = flask.redirect('')self.clear_cookies(response)return response", "docstring": "Revoke the token and remove the cookie.", "id": "f1167:c0:m5"} {"signature": "def get(self, url, params, **kwargs):", "body": "kwargs[''] = paramsreturn self.request('', url, **kwargs)", "docstring": "Issues an HTTP GET across the wire via the Python requests\n library. See *request()* for information on keyword args.", "id": "f1170:c1:m4"} {"signature": "def delete(self, url, data, **kwargs):", "body": "kwargs[''] = datareturn self.request('', url, **kwargs)", "docstring": "Issues an HTTP DELETE across the wire via the Python requests\n library. See *request* for information on keyword args.", "id": "f1170:c1:m6"} {"signature": "def _mediawiki_cell_attrs(row, colaligns):", "body": "alignment = {\"\": '',\"\": '',\"\": '',\"\": ''}row2 = [alignment[a] + c for c, a in zip(row, colaligns)]return row2", "docstring": "Prefix every cell in a row with an HTML alignment attribute.", "id": "f1197:m19"} {"signature": "def _column_type(strings, has_invisible=True):", "body": "types = [_type(s, has_invisible) for s in strings]return reduce(_more_generic, types, int)", "docstring": "The least generic type all column values are convertible to.\n\n>>> _column_type([\"1\", \"2\"]) is _int_type\nTrue\n>>> _column_type([\"1\", \"2.3\"]) is _float_type\nTrue\n>>> _column_type([\"1\", \"2.3\", \"four\"]) is _text_type\nTrue\n>>> _column_type([\"four\", '\\u043f\\u044f\\u0442\\u044c']) is _text_type\nTrue\n>>> _column_type([None, \"brux\"]) is _text_type\nTrue\n>>> _column_type([1, 2, None]) is _int_type\nTrue", "id": "f1197:m13"} {"signature": "def _format(val, valtype, floatfmt, missingval=\"\"):", "body": "if val is None:return missingvalif valtype in [int, _binary_type, _text_type]:return \"\".format(val)elif valtype is float:return format(float(val), floatfmt)else:return \"\".format(val)", "docstring": "Format a value accoding to its type.\n\nUnicode is supported:\n\n>>> hrow = ['\\u0431\\u0443\\u043a\\u0432\\u0430', \\\n '\\u0446\\u0438\\u0444\\u0440\\u0430'] ; \\\n tbl = [['\\u0430\\u0437', 2], ['\\u0431\\u0443\\u043a\\u0438', 4]] ; \\\n good_result = '\\\\u0431\\\\u0443\\\\u043a\\\\u0432\\\\u0430 \\\n \\\\u0446\\\\u0438\\\\u0444\\\\u0440\\\\u0430\\\\n-------\\\n -------\\\\n\\\\u0430\\\\u0437 \\\n 2\\\\n\\\\u0431\\\\u0443\\\\u043a\\\\u0438 4' ; \\\n tabulate(tbl, headers=hrow) == good_result\nTrue", "id": "f1197:m14"} {"signature": "def _normalize_tabular_data(tabular_data, headers, sort=True):", "body": "if hasattr(tabular_data, \"\") and hasattr(tabular_data, \"\"):if hasattr(tabular_data.values, \"\"):keys = list(tabular_data.keys())rows = list(izip_longest(*list(tabular_data.values())))elif hasattr(tabular_data, \"\"):keys = list(tabular_data.keys())vals = tabular_data.valuesnames = tabular_data.indexrows = [[v] + list(row) for v, row in zip(names, vals)]else:raise ValueError(\"\"\"\")if headers == \"\":headers = list(map(_text_type, keys)) else: rows = list(tabular_data)if headers == \"\" and len(rows) > : headers = list(map(_text_type, list(range(len(rows[])))))if headers == \"\" and len(rows) > :headers = list(map(_text_type, rows[])) rows = rows[:]headers = list(headers)rows = list(map(list, rows))if sort and len(rows) > :rows = sorted(rows, key=lambda x: x[])if headers and len(rows) > :nhs = len(headers)ncols = len(rows[])if nhs < ncols:headers = [\"\"] * (ncols - nhs) + headersreturn rows, headers", "docstring": "Transform a supported data type to a list of lists, and a list of headers.\n\nSupported tabular data types:\n\n* list-of-lists or another iterable of iterables\n\n* 2D NumPy arrays\n\n* dict of iterables (usually used with headers=\"keys\")\n\n* pandas.DataFrame (usually used with headers=\"keys\")\n\nThe first row can be used as headers if headers=\"firstrow\",\ncolumn indices can be used as headers if headers=\"keys\".", "id": "f1197:m16"} {"signature": "def _padleft(width, s, has_invisible=True):", "body": "iwidth = width + len(s) - len(_strip_invisible(s))if has_invisible else widthfmt = \"\" % iwidthreturn fmt.format(s)", "docstring": "Flush right.\n\n>>> _padleft(6, '\\u044f\\u0439\\u0446\\u0430') \\\n == ' \\u044f\\u0439\\u0446\\u0430'\nTrue", "id": "f1197:m6"} {"signature": "def _line_segment_with_colons(linefmt, align, colwidth):", "body": "fill = linefmt.hlinew = colwidthif align in [\"\", \"\"]:return (fill[] * (w - )) + \"\"elif align == \"\":return \"\" + (fill[] * (w - )) + \"\"elif align == \"\":return \"\" + (fill[] * (w - ))else:return fill[] * w", "docstring": "Return a segment of a horizontal line with optional colons which\n indicate column's alignment (as in `pipe` output format).", "id": "f1197:m20"} {"signature": "def _type(string, has_invisible=True):", "body": "if has_invisible and(isinstance(string, _text_type) or isinstance(string, _binary_type)):string = _strip_invisible(string)if string is None:return _none_typeelif _isint(string):return _int_typeelif _isnumber(string):return floatelif isinstance(string, _binary_type):return _binary_typeelse:return _text_type", "docstring": "The least generic type (type(None), int, float, str, unicode).\n\n>>> _type(None) is type(None)\nTrue\n>>> _type(\"foo\") is type(\"\")\nTrue\n>>> _type(\"1\") is type(1)\nTrue\n>>> _type('\\x1b[31m42\\x1b[0m') is type(42)\nTrue\n>>> _type('\\x1b[31m42\\x1b[0m') is type(42)\nTrue", "id": "f1197:m4"} {"signature": "def _build_line(colwidths, padding, begin, fill, sep, end):", "body": "cells = [fill * (w + * padding) for w in colwidths]return _build_row(cells, , begin, sep, end)", "docstring": "Return a string which represents a horizontal line.", "id": "f1197:m18"} {"signature": "def naturalsize(value, binary=False, gnu=False, format=''):", "body": "if gnu:suffix = suffixes['']elif binary:suffix = suffixes['']else:suffix = suffixes['']base = if (gnu or binary) else bytes = float(value)if bytes == and not gnu:return ''elif bytes < base and not gnu:return '' % byteselif bytes < base and gnu:return '' % bytesfor i, s in enumerate(suffix):unit = base ** (i + )if bytes < unit and not gnu:return (format + '') % ((base * bytes / unit), s)elif bytes < unit and gnu:return (format + '') % ((base * bytes / unit), s)if gnu:return (format + '') % ((base * bytes / unit), s)return (format + '') % ((base * bytes / unit), s)", "docstring": "Format a number of byteslike a human readable filesize (eg. 10 kB). By\n default, decimal suffixes (kB, MB) are used. Passing binary=true will use\n binary suffixes (KiB, MiB) are used and the base will be 2**10 instead of\n 10**3. If ``gnu`` is True, the binary argument is ignored and GNU-style\n (ls -sh style) prefixes are used (K, M) with the 2**10 definition.\n Non-gnu modes are compatible with jinja2's ``filesizeformat`` filter.", "id": "f1200:m0"} {"signature": "def evaluate(self, data=None, data_type='', is_list=False):", "body": "payload = {'': data,'': self.expr,'': data_type,'': is_list}res = self._client.post('', payload)return res['']", "docstring": "Evaluates the expression with the provided context and format.", "id": "f1201:c1:m1"} {"signature": "def _combine(self, other, conn=''):", "body": "f = Filter()f.deepcopy = self.deepcopy and other.filtersif f.deepcopy:self_filters = copy.deepcopy(self.filters)other_filters = copy.deepcopy(other.filters)else:self_filters = self.filtersother_filters = other.filtersif not self.filters:f.filters = other_filterselif not other.filters:f.filters = self_filterselif conn in self.filters[]:f.filters = self_filtersf.filters[][conn].extend(other_filters)elif conn in other.filters[]:f.filters = other_filtersf.filters[][conn].extend(self_filters)else:f.filters = [{conn: self_filters + other_filters}]return f", "docstring": "OR and AND will create a new Filter, with the filters from both Filter\nobjects combined with the connector `conn`.", "id": "f1202:c0:m2"} {"signature": "def next(self):", "body": "if not hasattr(self, ''):self.__iter__()if self._cursor == len(self):raise StopIteration()if self._buffer_idx == len(self._buffer):self.execute(self._page_offset + self._buffer_idx)self._buffer_idx = self._cursor += self._buffer_idx += return self._buffer[self._buffer_idx - ]", "docstring": "Allows the Query object to be an iterable.\n\nThis method will iterate through a cached result set\nand fetch successive pages as required.\n\nA `StopIteration` exception will be raised when there aren't\nany more results available or when the requested result\nslice range or limit has been fetched.\n\nReturns: The next result.", "id": "f1202:c2:m19"} {"signature": "def __init__(self,dataset_id,query=None,genome_build=None,filters=None,fields=None,exclude_fields=None,entities=None,ordering=None,limit=float(''),page_size=DEFAULT_PAGE_SIZE,result_class=dict,debug=False,error=None,**kwargs):", "body": "self._dataset_id = dataset_idself._data_url = ''.format(dataset_id)self._query = queryself._genome_build = genome_buildself._result_class = result_classself._fields = fieldsself._exclude_fields = exclude_fieldsself._entities = entitiesself._ordering = orderingself._debug = debugself._error = errorif filters:if isinstance(filters, Filter):filters = [filters]else:filters = []self._filters = filtersself._response = Noneself._limit = limitself._page_size = int(page_size)self._page_offset = Noneself._slice = Noneif self._limit < :raise Exception('')if self._page_size <= :raise Exception('')self._client = kwargs.get('') or self._client or client", "docstring": "Creates a new Query object.\n\n:Parameters:\n - `dataset_id`: Unique ID of dataset to query.\n - `query` (optional): An optional query string.\n - `genome_build`: The genome build to use for the query.\n - `result_class` (optional): Class of object returned by query.\n - `fields` (optional): List of specific fields to retrieve.\n - `exclude_fields` (optional): List of specific fields to exclude.\n - `entities` (optional): List of entity tuples to filter on.\n - `ordering` (optional): List of fields to order the results by.\n - `filters` (optional): Filter or List of filter objects.\n - `limit` (optional): Maximum number of query results to return.\n - `page_size` (optional): Number of results to fetch per query page.\n - `debug` (optional): Sends debug information to the API.", "id": "f1202:c2:m0"} {"signature": "def count(self):", "body": "return self.total", "docstring": "Returns the total number of results returned by a query.\nThe count is dependent on the filters, but independent of any limit.\nIt is like SQL:\n SELECT COUNT(*) FROM [WHERE condition].\nSee also __len__ for a function that is dependent on limit.", "id": "f1202:c2:m6"} {"signature": "@classmethoddef _process_filters(cls, filters):", "body": "data = []for f in filters:if isinstance(f, Filter):if f.filters:data.extend(cls._process_filters(f.filters))elif isinstance(f, dict):key = list(f.keys())[]val = f[key]if isinstance(val, dict):filter_filters = cls._process_filters([val])if len(filter_filters) == :filter_filters = filter_filters[]data.append({key: filter_filters})else:data.append({key: cls._process_filters(val)})else:data.extend((f,))return data", "docstring": "Takes a list of filters and returns JSON\n\n :Parameters:\n - `filters`: List of Filters, (key, val) tuples, or dicts\n\n Returns: List of JSON API filters", "id": "f1202:c2:m11"} {"signature": "def migrate(self, target, follow=True, **kwargs):", "body": "from solvebio import Datasetfrom solvebio import DatasetMigrationif isinstance(target, Dataset):target_id = target.idelse:target_id = targetlimit = kwargs.pop('', None)if not limit and self._limit < float(''):limit = self._limitparams = self._build_query(limit=limit)params.pop('', None)params.pop('', None)migration = DatasetMigration.create(source_id=self._dataset_id,target_id=target_id,source_params=params,client=self._client,**kwargs)if follow:migration.follow()return migration", "docstring": "Migrate the data from the Query to a target dataset.\n\nValid optional kwargs include:\n\n* target_fields\n* include_errors\n* validation_params\n* metadata\n* commit_mode", "id": "f1202:c2:m23"} {"signature": "def filter(self, *filters, **kwargs):", "body": "f = list(filters)if kwargs:f += [Filter(**kwargs)]return self._clone(filters=f)", "docstring": "Returns this Query instance with the query args combined with\nexisting set with AND.\n\nkwargs are simply passed to a new Filter object and combined to any\nother filters with AND.\n\nBy default, everything is combined using AND. If you provide\nmultiple filters in a single filter call, those are ANDed\ntogether. If you provide multiple filters in multiple filter\ncalls, those are ANDed together.\n\nIf you want something different, use the F class which supports\n``&`` (and), ``|`` (or) and ``~`` (not) operators. Then call\nfilter once with the resulting Filter instance.", "id": "f1202:c2:m3"} {"signature": "def __init__(self, queries, **kwargs):", "body": "if not isinstance(queries, list):queries = [queries]self._queries = queriesself._client = kwargs.get('') or self._client or client", "docstring": "Expects a list of Query objects.", "id": "f1202:c3:m0"} {"signature": "def logout(*args):", "body": "if get_credentials():delete_credentials()_print_msg('')_print_msg('')", "docstring": "Delete's the user's locally-stored credentials.", "id": "f1207:m4"} {"signature": "def print_user(user):", "body": "email = user['']domain = user['']['']role = user['']print(''''.format(domain, email, role))", "docstring": "Prints information about the current user.", "id": "f1207:m6"} {"signature": "def launch_ipython_shell(args): ", "body": "try:import IPython except ImportError:_print(\"\"\"\")return Falseif hasattr(IPython, \"\"):if IPython.version_info > (, , , ''):return launch_ipython_5_shell(args)_print(\"\".format(IPython.__version__))return launch_ipython_legacy_shell(args)", "docstring": "Open the SolveBio shell (IPython wrapper)", "id": "f1209:m1"} {"signature": "def all(self, **params):", "body": "return self.request('', self[''], params=params)", "docstring": "Lists all items in a class that you have access to", "id": "f1223:c1:m0"} {"signature": "def get_by_natural_key(self, email_pgp_pub_field):", "body": "return self.get(email_pgp_pub_field=email_pgp_pub_field)", "docstring": "Get by natual key of email pub field.", "id": "f1241:c1:m0"} {"signature": "def db_for_write(self, model, **hints):", "body": "if model._meta.app_label == '':return ''return ''", "docstring": "Write to diff_keys.", "id": "f1243:c0:m1"} {"signature": "def as_sql(self, compiler, connection):", "body": "sql, params = super(DecryptedCol, self).as_sql(compiler, connection)sql = self.target.get_decrypt_sql(connection) % (sql, self.target.get_cast_sql())return sql, params", "docstring": "Build SQL with decryption and casting.", "id": "f1244:c0:m1"} {"signature": "def db_type(self, connection=None):", "body": "return ''", "docstring": "Value stored in the database is hexadecimal.", "id": "f1244:c2:m1"} {"signature": "def get_col(self, alias, output_field=None):", "body": "if output_field is None:output_field = selfif alias != self.model._meta.db_table or output_field != self:return DecryptedCol(alias,self,output_field)else:return self.cached_col", "docstring": "Get the decryption for col.", "id": "f1244:c2:m5"} {"signature": "def pre_save(self, model_instance, add):", "body": "if self.original:original_value = getattr(model_instance, self.original)setattr(model_instance, self.attname, original_value)return super(HashMixin, self).pre_save(model_instance, add)", "docstring": "Save the original_value.", "id": "f1244:c1:m1"} {"signature": "def get_placeholder(self, value=None, compiler=None, connection=None):", "body": "return self.encrypt_sql.format(get_setting(connection, ''))", "docstring": "Tell postgres to encrypt this field using PGP.", "id": "f1244:c3:m0"} {"signature": "def get_placeholder(self, value, compiler, connection):", "body": "raise NotImplementedError('')", "docstring": "Tell postgres to encrypt this field using PGP.", "id": "f1244:c2:m2"} {"signature": "def as_sql(self, qn, connection):", "body": "lhs, lhs_params = self.process_lhs(qn, connection)rhs, rhs_params = self.process_rhs(qn, connection)params = lhs_params + rhs_paramsrhs = self.lhs.field.encrypt_sql % rhsreturn (''.format(lhs, rhs)), params", "docstring": "Responsible for creating the lookup with the digest SQL.\n\n Modify the right hand side expression to compare the value passed\n to a hash.", "id": "f1246:c0:m0"} {"signature": "@classmethoddef epgm(cls):", "body": "return '' + cls.pgm()", "docstring": "Generates available Encapsulated PGM address.", "id": "f1251:c0:m4"} {"signature": "@classmethoddef inproc(cls):", "body": "return ''.format(rand_str())", "docstring": "Generates random in-process address.", "id": "f1251:c0:m0"} {"signature": "def rpc(f=None, **kwargs):", "body": "if f is not None:if isinstance(f, six.string_types):if '' in kwargs:raise ValueError('')kwargs[''] = felse:return rpc(**kwargs)(f)return functools.partial(_rpc, **kwargs)", "docstring": "Marks a method as RPC.", "id": "f1254:m2"} {"signature": "def class_name(obj):", "body": "return type(obj).__name__", "docstring": "Returns the class name of the object.", "id": "f1256:m0"} {"signature": "def make_repr(obj, params=None, keywords=None, data=None, name=None,reprs=None):", "body": "opts = []if params is not None:opts.append(''.join(_repr_attr(obj, attr, data, reprs) for attr in params))if keywords is not None:opts.append(''.join('' % (attr, _repr_attr(obj, attr, data, reprs))for attr in keywords))if name is None:name = class_name(obj)return '' % (name, ''.join(opts))", "docstring": "Generates a string of object initialization code style. It is useful\n for custom __repr__ methods::\n\n class Example(object):\n\n def __init__(self, param, keyword=None):\n self.param = param\n self.keyword = keyword\n\n def __repr__(self):\n return make_repr(self, ['param'], ['keyword'])\n\n See the representation of example object::\n\n >>> Example('hello', keyword='world')\n Example('hello', keyword='world')", "id": "f1256:m2"} {"signature": "def work(self, socket, call, args, kwargs, topics=()):", "body": "task_id = uuid4_bytes()reply_socket, topics = self.replier(socket, topics, call.reply_to)if reply_socket:channel = (call.call_id, task_id, topics)else:channel = (None, None, None)f, rpc_spec = self.find_call_target(call)if rpc_spec.reject_if.__get__(self.app)(call, topics):reply_socket and self.reject(reply_socket, call.call_id, topics)returnreply_socket and self.accept(reply_socket, channel)success = Falsewith self.catch_exceptions():try:val = self.call(call, args, kwargs, f, rpc_spec)except:exc_info = sys.exc_info()self.raise_(reply_socket, channel, exc_info)reraise(*exc_info)success = Trueif not success:returnif isinstance(val, Iterator):vals = valwith self.catch_exceptions():try:try:val = next(vals)except StopIteration:passelse:self.send_reply(reply_socket, YIELD, val, *channel)for val in vals:self.send_reply(reply_socket, YIELD, val, *channel)self.send_reply(reply_socket, BREAK, None, *channel)except:exc_info = sys.exc_info()self.raise_(reply_socket, channel, exc_info)reraise(*exc_info)else:self.send_reply(reply_socket, RETURN, val, *channel)", "docstring": "Calls a function and send results to the collector. It supports\n all of function actions. A function could return, yield, raise any\n packable objects.", "id": "f1257:c1:m6"} {"signature": "def dispatch_reply(self, reply, value):", "body": "method = reply.methodcall_id = reply.call_idtask_id = reply.task_idif method & ACK:try:result_queue = self.result_queues[call_id]except KeyError:raise KeyError('')if method == ACCEPT:worker_info = valueresult = RemoteResult(self, call_id, task_id, worker_info)self.results[call_id][task_id] = resultresult_queue.put_nowait(result)elif method == REJECT:result_queue.put_nowait(None)else:result = self.results[call_id][task_id]result.set_reply(reply.method, value)", "docstring": "Dispatches the reply to the proper queue.", "id": "f1257:c5:m4"} {"signature": "def __call__(self):", "body": "poller = zmq.Poller()for socket in self.sockets:poller.register(socket, zmq.POLLIN)group = self.greenlet_groupmsgs = []capture = msgs.extenddef accept(socket, call, args, kwargs, topics):group.spawn(self.work, socket, call, args, kwargs, topics)group.join()def reject(socket, call, topics):__, call_id, reply_to, __, __ = callreply_socket, topics = self.replier(socket, topics, reply_to)self.reject(reply_socket, call_id, topics)def reject_if(call, topics):if self.reject_if(call, topics):return Truetry:__, rpc_spec = self.find_call_target(call)except KeyError:return Truereturn rpc_spec.reject_if.__get__(self.app)(call, topics)try:while True:for socket, event in safe(poller.poll):assert event & zmq.POLLINdel msgs[:]try:header, payload, topics = recv(socket, capture=capture)call = parse_call(header)if group.full() or reject_if(call, topics):reject(socket, call, topics)continueargs, kwargs = self.unpack(payload)except:handle = self.malformed_message_handlerif handle is not None:exc_info = sys.exc_info()handle(self, exc_info, msgs[:])del handlecontinueaccept(socket, call, args, kwargs, topics)try:del header, payload, topicsdel calldel args, kwargsexcept UnboundLocalError:passfinally:group.kill()", "docstring": "Runs the worker. While running, an RPC service is online.", "id": "f1257:c1:m5"} {"signature": "def reject(self, reply_socket, call_id, topics=()):", "body": "info = self.info or b''self.send_raw(reply_socket, REJECT, info, call_id, b'', topics)", "docstring": "Sends REJECT reply.", "id": "f1257:c1:m10"} {"signature": "def default_exception_handler(worker, exc_info):", "body": "reraise(*exc_info)", "docstring": "The default exception handler for :class:`Worker`. It just raises\n the given ``exc_info``.", "id": "f1257:m0"} {"signature": "def hBool(pyVal):", "body": "return BOOL.fromPy(pyVal)", "docstring": "create hdl bool value (for example bool value in vhdl)", "id": "f1262:m1"} {"signature": "def areValues(*items):", "body": "for i in items:if not isinstance(i, Value):return Falsereturn True", "docstring": ":return: True if all arguments are instances of Value class else False", "id": "f1263:m0"} {"signature": "def _reinterpret_cast(self, toT):", "body": "return self._dtype.reinterpret_cast(self, toT)", "docstring": "type cast to type of same size", "id": "f1263:c0:m3"} {"signature": "def __init__(self, val, dtype, vldMask, updateTime=-):", "body": "self.val = valself._dtype = dtypeself.vldMask = vldMaskself.updateTime = updateTime", "docstring": ":param val: pythonic value representing this value\n:param dtype: data type object from which this value was derived from\n:param vldMask: validity mask for value\n:param updateTime: simulation time when this value vas created", "id": "f1263:c0:m0"} {"signature": "@internaldef reinterpret_bits_to_hstruct(sigOrVal, hStructT):", "body": "container = hStructT.fromPy(None)offset = for f in hStructT.fields:t = f.dtypewidth = t.bit_length()if f.name is not None:s = sigOrVal[(width + offset):offset]s = s._reinterpret_cast(t)setattr(container, f.name, s)offset += widthreturn container", "docstring": "Reinterpret signal of type Bits to signal of type HStruct", "id": "f1268:m2"} {"signature": "def __setitem__(self, index, value):", "body": "if isinstance(index, int):index = INT.fromPy(index)else:assert isinstance(self, Value)assert index._dtype == INT, index._dtypeif not isinstance(value, Value):value = self._dtype.elmType.fromPy(value)else:assert value._dtype == self._dtype.elmType, (value._dtype, self._dtype.elmType)return self._setitem__val(index, value)", "docstring": "Only syntax sugar for user, not used inside HWT\n\n* In HW design is not used (__getitem__ returns \"reference\"\n and it is used)\n\n* In simulator is used _setitem__val directly", "id": "f1269:c0:m7"} {"signature": "@internaldef _getitem__val(self, key):", "body": "try:kv = key.valif not key._isFullVld():raise KeyError()else:if kv >= self._dtype.size:raise KeyError()return self.val[kv].clone()except KeyError:return self._dtype.elmType.fromPy(None)", "docstring": ":atention: this will clone item from array, iterate over .val\n if you need to modify items", "id": "f1269:c0:m4"} {"signature": "def walkFlattenFields(sigOrVal, skipPadding=True):", "body": "t = sigOrVal._dtypeif isinstance(t, Bits):yield sigOrValelif isinstance(t, HUnion):yield from walkFlattenFields(sigOrVal._val, skipPadding=skipPadding)elif isinstance(t, HStruct):for f in t.fields:isPadding = f.name is Noneif not isPadding or not skipPadding:if isPadding:v = f.dtype.fromPy(None)else:v = getattr(sigOrVal, f.name)yield from walkFlattenFields(v)elif isinstance(t, HArray):for item in sigOrVal:yield from walkFlattenFields(item)else:raise NotImplementedError(t)", "docstring": "Walk all simple values in HStruct or HArray", "id": "f1270:m1"} {"signature": "@internaldef slice_to_SLICE(sliceVals, width):", "body": "if sliceVals.step is not None:raise NotImplementedError()start = sliceVals.startstop = sliceVals.stopif sliceVals.start is None:start = INT.fromPy(width)else:start = toHVal(sliceVals.start)if sliceVals.stop is None:stop = INT.fromPy()else:stop = toHVal(sliceVals.stop)startIsVal = isinstance(start, Value)stopIsVal = isinstance(stop, Value)indexesAreValues = startIsVal and stopIsValif indexesAreValues:updateTime = max(start.updateTime, stop.updateTime)else:updateTime = -return Slice.getValueCls()((start, stop), SLICE, , updateTime)", "docstring": "convert python slice to value of SLICE hdl type", "id": "f1271:m0"} {"signature": "def _size(self):", "body": "assert isinstance(self, Value)return int(self.val[]) - int(self.val[])", "docstring": ":return: how many bits is this slice selecting", "id": "f1273:c0:m2"} {"signature": "def toPy(self):", "body": "return slice(int(self.val[]), int(self.val[]))", "docstring": "Convert to python slice object", "id": "f1273:c0:m1"} {"signature": "def __init__(self, name, valueNames):", "body": "super(HEnum, self).__init__()self.name = nameself._allValues = tuple(valueNames)for name in valueNames:v = self.fromPy(name)assert not hasattr(self, name)setattr(self, name, v)", "docstring": ":param name: name for this type\n:param valueNames: sequence of string which will be used as names\n for enum members", "id": "f1275:c0:m0"} {"signature": "@internaldef domain_size(self):", "body": "return int( ** self.bit_length())", "docstring": ":return: how many values can have specified type", "id": "f1275:c0:m3"} {"signature": "@classmethoddef fromPy(cls, val, typeObj, vldMask=None):", "body": "vld = int(val is not None)if not vld:assert vldMask is None or vldMask == val = Falseelse:if vldMask == :val = Falsevld = else:val = bool(val)return cls(val, typeObj, vld)", "docstring": ":param val: value of python type bool or None\n:param typeObj: instance of HdlType\n:param vldMask: None vldMask is resolved from val,\n if is 0 value is invalidated\n if is 1 value has to be valid", "id": "f1278:c0:m1"} {"signature": "@internaldef domain_size(self):", "body": "return << ", "docstring": ":return: how many values can have specified type", "id": "f1279:c0:m3"} {"signature": "@classmethoddef fromPy(cls, val, typeObj, vldMask=None):", "body": "if vldMask == :val = Nonereturn cls(val, typeObj)", "docstring": ":param val: None or dict {field name: field value}\n:param typeObj: instance of String HdlType\n:param vldMask: if is None validity is resolved from val\n if is 0 value is invalidated\n if is 1 value has to be valid", "id": "f1286:c0:m2"} {"signature": "@classmethoddef fromPy(cls, val, typeObj, vldMask=None):", "body": "assert isinstance(val, str) or val is Nonevld = if val is None else if not vld:assert vldMask is None or vldMask == val = \"\"else:if vldMask == :val = \"\"vld = return cls(val, typeObj, vld)", "docstring": ":param val: python string or None\n:param typeObj: instance of String HdlType\n:param vldMask: if is None validity is resolved from val\n if is 0 value is invalidated\n if is 1 value has to be valid", "id": "f1289:c0:m0"} {"signature": "def bit_length(self):", "body": "try:itemSize = self.elmType.bit_lengthexcept AttributeError:itemSize = Noneif itemSize is None:raise TypeError(\"\"\"\")s = self.sizeif isinstance(s, RtlSignalBase):s = int(s.staticEval())return s * itemSize()", "docstring": ":return: bit width for this type", "id": "f1293:c0:m3"} {"signature": "def __getitem__(self, key):", "body": "assert int(key) > , key from hwt.hdl.types.array import HArrayreturn HArray(self, key)", "docstring": "[] operator to create an array of this type.", "id": "f1296:c0:m6"} {"signature": "@internal@classmethoddef getValueCls(cls):", "body": "raise NotImplementedError()", "docstring": ":attention: Overrode in implementation of concrete HdlType.\n\n:return: class for value derived from this type", "id": "f1296:c0:m5"} {"signature": "def statementsAreSame(statements: List[HdlStatement]) -> bool:", "body": "iterator = iter(statements)try:first = next(iterator)except StopIteration:return Truereturn all(first.isSame(rest) for rest in iterator)", "docstring": ":return: True if all statements are same", "id": "f1300:m4"} {"signature": "@internaldef _destroy(self):", "body": "ctx = self._get_rtl_context()for i in self._inputs:i.endpoints.discard(self)for o in self._outputs:o.drivers.remove(self)ctx.statements.remove(self)", "docstring": "Disconnect this statement from signals and delete it from RtlNetlist context\n\n:attention: signal endpoints/drivers will be altered\n that means they can not be used for iteration", "id": "f1300:c2:m24"} {"signature": "@internaldef _discover_enclosure(self) -> None:", "body": "raise NotImplementedError(\"\"\"\", self.__class__, self)", "docstring": "Discover all outputs for which is this steement enclosed _enclosed_for property\n(has driver in all code branches)", "id": "f1300:c2:m4"} {"signature": "@internaldef _on_merge(self, other):", "body": "self._inputs.extend(other._inputs)self._outputs.extend(other._outputs)if self._sensitivity is not None:self._sensitivity.extend(other._sensitivity)else:assert other._sensitivity is Noneif self._enclosed_for is not None:self._enclosed_for.update(other._enclosed_for)else:assert other._enclosed_for is Noneother_was_top = other.parentStm is Noneif other_was_top:other._get_rtl_context().statements.remove(other)for s in other._inputs:s.endpoints.discard(other)s.endpoints.append(self)for s in other._outputs:s.drivers.discard(other)s.drivers.append(self)", "docstring": "After merging statements update IO, sensitivity and context\n\n:attention: rank is not updated", "id": "f1300:c2:m12"} {"signature": "@internal@staticmethoddef _merge_statement_lists(stmsA: List[\"\"], stmsB: List[\"\"])-> List[\"\"]:", "body": "if stmsA is None and stmsB is None:return Nonetmp = []a_it = iter(stmsA)b_it = iter(stmsB)a = Noneb = Nonea_empty = Falseb_empty = Falsewhile not a_empty and not b_empty:while not a_empty:a = next(a_it, None)if a is None:a_empty = Truebreakelif a.rank == :tmp.append(a)a = Noneelse:breakwhile not b_empty:b = next(b_it, None)if b is None:b_empty = Truebreakelif b.rank == :tmp.append(b)b = Noneelse:breakif a is not None or b is not None:a._merge_with_other_stm(b)tmp.append(a)a = Noneb = Nonereturn tmp", "docstring": "Merge two lists of statements into one\n\n:return: list of merged statements", "id": "f1300:c2:m18"} {"signature": "@internaldef _on_parent_event_dependent(self):", "body": "if not self._is_completly_event_dependent:self._is_completly_event_dependent = Truefor stm in self._iter_stms():stm._on_parent_event_dependent()", "docstring": "After parrent statement become event dependent\npropagate event dependency flag to child statements", "id": "f1300:c2:m20"} {"signature": "def isSameStatementList(stmListA: List[HdlStatement],stmListB: List[HdlStatement]) -> bool:", "body": "if stmListA is stmListB:return Trueif stmListA is None or stmListB is None:return Falsefor a, b in zip(stmListA, stmListB):if not a.isSame(b):return Falsereturn True", "docstring": ":return: True if two lists of HdlStatement instances are same", "id": "f1300:m3"} {"signature": "def __init__(self, name, dtype, defVal=None, virtualOnly=False):", "body": "self.name = nameself._dtype = dtypeself.virtualOnly = virtualOnlyif defVal is None:defVal = dtype.fromPy(None)self.defVal = defValself._setDefValue()", "docstring": ":param name: name for better orientation in netlists\n (used only in serialization)\n:param dtype: data type of this signal\n:param defVal: value for initialization\n:param virtualOnly: flag indicates that this assignments is only\n virtual and should not be added into\n netlist, because it is only for internal notation", "id": "f1301:c0:m0"} {"signature": "@internaldef _discover_enclosure(self):", "body": "outputs = self._outputsself._ifTrue_enclosed_for = self._discover_enclosure_for_statements(self.ifTrue, outputs)elif_encls = self._elIfs_enclosed_for = []for _, stms in self.elIfs:e = self._discover_enclosure_for_statements(stms, outputs)elif_encls.append(e)self._ifFalse_enclosed_for = self._discover_enclosure_for_statements(self.ifFalse, outputs)assert self._enclosed_for is Noneencl = self._enclosed_for = set()for s in self._ifTrue_enclosed_for:enclosed = Truefor elif_e in elif_encls:if s not in elif_e:enclosed = Falsebreakif enclosed and s in self._ifFalse_enclosed_for:encl.add(s)", "docstring": "Doc on parent class :meth:`HdlStatement._discover_enclosure`", "id": "f1305:c0:m4"} {"signature": "@internaldef _merge_with_other_stm(self, other: \"\") -> None:", "body": "merge = self._merge_statement_listsself.ifTrue = merge(self.ifTrue, other.ifTrue)new_elifs = []for ((c, elifA), (_, elifB)) in zip(self.elIfs, other.elIfs):new_elifs.append((c, merge(elifA, elifB)))self.elIfs = new_elifsself.ifFalse = merge(self.ifFalse, other.ifFalse)other.ifTrue = []other.elIfs = []other.ifFalse = Noneself._on_merge(other)", "docstring": ":attention: statements has to be mergable (to check use _is_mergable method)", "id": "f1305:c0:m11"} {"signature": "@internaldef _fill_enclosure(self, enclosure: Dict[RtlSignalBase, HdlStatement]):", "body": "pass", "docstring": "Enclosure is never requiered", "id": "f1309:c0:m4"} {"signature": "def __init__(self, src, dst, indexes=None, virtualOnly=False,parentStm=None,sensitivity=None,is_completly_event_dependent=False):", "body": "super(Assignment, self).__init__(parentStm,sensitivity,is_completly_event_dependent)self.src = srcisReal = not virtualOnlyif not isinstance(src, Value):self._inputs.append(src)if isReal:src.endpoints.append(self)self.dst = dstif not isinstance(dst, Value):self._outputs.append(dst)if isReal:dst.drivers.append(self)self.indexes = indexesif indexes:for i in indexes:if not isinstance(i, Value):self._inputs.append(i)if isReal:i.endpoints.append(self)self._instId = Assignment._nextInstId()if not virtualOnly:dst.ctx.statements.add(self)", "docstring": ":param dst: destination to assign to\n:param src: source which is assigned from\n:param indexes: description of index selector on dst\n (list of Index/Slice objects) (f.e. [[0], [1]] means dst[0][1])\n:param virtualOnly: flag indicates that this assignments\n is only virtual and should not be added into\n netlist, because it is only for internal notation", "id": "f1309:c0:m0"} {"signature": "@internaldef _on_parent_event_dependent(self):", "body": "if not self._is_completly_event_dependent:self._is_completly_event_dependent = True", "docstring": "After parrent statement become event dependent", "id": "f1309:c0:m6"} {"signature": "@internaldef seqEval(self):", "body": "self.dst._val = self.src.staticEval()", "docstring": "Sequentially evaluate this assignment", "id": "f1309:c0:m11"} {"signature": "@internaldef _loadFromHStream(self, dtype: HStream, bitAddr: int) -> int:", "body": "ch = TransTmpl(dtype.elmType, , parent=self, origin=self.origin)self.children.append(ch)return bitAddr + dtype.elmType.bit_length()", "docstring": "Parse HUnion type to this transaction template instance\n\n:return: address of it's end", "id": "f1310:c2:m5"} {"signature": "def __exit__(self, exc_type, exc_val, exc_tb):", "body": "self.actual = self.parents.pop()self.onParentNames.pop()", "docstring": "Move back to parent", "id": "f1310:c1:m3"} {"signature": "def _loadFromHType(self, dtype: HdlType, bitAddr: int) -> None:", "body": "self.bitAddr = bitAddrchildrenAreChoice = Falseif isinstance(dtype, Bits):ld = self._loadFromBitselif isinstance(dtype, HStruct):ld = self._loadFromHStructelif isinstance(dtype, HArray):ld = self._loadFromArrayelif isinstance(dtype, HStream):ld = self._loadFromHStreamelif isinstance(dtype, HUnion):ld = self._loadFromUnionchildrenAreChoice = Trueelse:raise TypeError(\"\", dtype)self.bitAddrEnd = ld(dtype, bitAddr)self.childrenAreChoice = childrenAreChoice", "docstring": "Parse any HDL type to this transaction template instance", "id": "f1310:c2:m6"} {"signature": "@internaldef _loadFromUnion(self, dtype: HdlType, bitAddr: int) -> int:", "body": "for field in dtype.fields.values():ch = TransTmpl(field.dtype, , parent=self, origin=field)self.children.append(ch)return bitAddr + dtype.bit_length()", "docstring": "Parse HUnion type to this transaction template instance\n\n:return: address of it's end", "id": "f1310:c2:m4"} {"signature": "@internaldef _loadFromBits(self, dtype: HdlType, bitAddr: int):", "body": "return bitAddr + dtype.bit_length()", "docstring": "Parse Bits type to this transaction template instance\n\n:return: address of it's end", "id": "f1310:c2:m2"} {"signature": "def walkFlatten(self, offset: int=,shouldEnterFn=_default_shouldEnterFn,otherObjItCtx: ObjIteratorCtx =_DummyIteratorCtx()) -> Generator[Union[Tuple[Tuple[int, int], ''], ''],None, None]:", "body": "t = self.dtypebase = self.bitAddr + offsetend = self.bitAddrEnd + offsetshouldEnter, shouldYield = shouldEnterFn(self)if shouldYield:yield ((base, end), self)if shouldEnter:if isinstance(t, Bits):passelif isinstance(t, HStruct):for ch in self.children:with otherObjItCtx(ch.origin.name):yield from ch.walkFlatten(offset,shouldEnterFn,otherObjItCtx)elif isinstance(t, HArray):itemSize = (self.bitAddrEnd - self.bitAddr) // self.itemCntfor i in range(self.itemCnt):with otherObjItCtx(i):yield from self.children.walkFlatten(base + i * itemSize,shouldEnterFn,otherObjItCtx)elif isinstance(t, HUnion):yield OneOfTransaction(self, offset, shouldEnterFn,self.children)elif isinstance(t, HStream):assert len(self.children) == yield StreamTransaction(self, offset, shouldEnterFn,self.children[])else:raise TypeError(t)", "docstring": "Walk fields in instance of TransTmpl\n\n:param offset: optional offset for all children in this TransTmpl\n:param shouldEnterFn: function (transTmpl) which returns True\n when field should be split on it's children\n:param shouldEnterFn: function(transTmpl) which should return\n (shouldEnter, shouldUse) where shouldEnter is flag that means\n iterator should look inside of this actual object\n and shouldUse flag means that this field should be used\n (=generator should yield it)\n:return: generator of tuples ((startBitAddress, endBitAddress),\n TransTmpl instance)", "id": "f1310:c2:m9"} {"signature": "def _iter_stms(self):", "body": "for _, stms in self.cases:yield from stmsif self.default is not None:yield from self.default", "docstring": "Doc on parent class :meth:`HdlStatement._iter_stms`", "id": "f1311:c0:m6"} {"signature": "@internaldef _try_reduce(self) -> Tuple[List[\"\"], bool]:", "body": "io_change = Falsenew_cases = []for val, statements in self.cases:_statements, rank_decrease, _io_change = self._try_reduce_list(statements)io_change |= _io_changeself.rank -= rank_decreasenew_cases.append((val, _statements))self.cases = new_casesif self.default is not None:self.default, rank_decrease, _io_change = self._try_reduce_list(self.default)self.rank -= rank_decreaseio_change |= _io_changereduce_self = not self._condHasEffect()if reduce_self:if self.cases:res = self.cases[][]elif self.default is not None:res = self.defaultelse:res = []else:res = [self, ]self._on_reduce(reduce_self, io_change, res)if not self.default:t = self.switchOn._dtypeif isinstance(t, HEnum):dom_size = t.domain_size()val_cnt = len(t._allValues)if len(self.cases) == val_cnt and val_cnt < dom_size:_, stms = self.cases.pop()self.default = stmsreturn res, io_change", "docstring": "Doc on parent class :meth:`HdlStatement._try_reduce`", "id": "f1311:c0:m9"} {"signature": "def getBusWordBitRange(self) -> Tuple[int, int]:", "body": "offset = self.startOfPart % self.parent.wordWidthreturn (offset + self.bit_length(), offset)", "docstring": ":return: bit range which contains data of this part on bus data signal", "id": "f1312:c0:m2"} {"signature": "def bit_length(self) -> int:", "body": "return self.endOfPart - self.startOfPart", "docstring": ":return: bit length of this part", "id": "f1312:c0:m1"} {"signature": "def getFieldBitRange(self) -> Tuple[int, int]:", "body": "offset = self.inFieldOffsetreturn (self.bit_length() + offset, offset)", "docstring": ":return: bit range which contains data of this part on interface\n of field", "id": "f1312:c0:m3"} {"signature": "@internaldef _wordIndx(self, addr: int):", "body": "return floor(addr / self.wordWidth)", "docstring": "convert bit address to index of word where this address is", "id": "f1313:c0:m2"} {"signature": "def __init__(self, origin, wordWidth, startBitAddr, endBitAddr,transParts):", "body": "self.origin = originself.wordWidth = wordWidthassert startBitAddr <= endBitAddrself.startBitAddr = startBitAddrself.endBitAddr = endBitAddrself.parts = transPartsself._fieldToTPart = Nonefor p in self.parts:p.parent = selfassert p.startOfPart >= startBitAddr, (p, startBitAddr)assert p.endOfPart <= endBitAddr, (p, endBitAddr)", "docstring": ":param origin: instance of HType (usually HStruct)\n from which this FrameTmpl was generated from\n:param wordWidth: width of word on interface\n where this template should be used\n:param startBitAddr: bit offset where this frame starts\n:param endBitAddr: bit offset where this frame ends\n (bit index of first bit behind this frame)\n:param transParts: instances of TransPart which are parts of this frame", "id": "f1313:c0:m0"} {"signature": "def fullWordCnt(self, start: int, end: int):", "body": "assert end >= start, (start, end)gap = max(, (end - start) - (start % self.wordWidth))return gap // self.wordWidth", "docstring": "Count of complete words between two addresses", "id": "f1315:c4:m1"} {"signature": "@internaldef iterSort(iterators, cmpFn):", "body": "actual = []_iterators = []for i, it in enumerate(iterators):try:a = next(it)_iterators.append((i, it))actual.append(a)except StopIteration:continuewhile True:if not _iterators:returnelif len(_iterators) == :originIndex, it = _iterators[]yield originIndex, actual[]for item in it:yield originIndex, itemreturnminimum = NoneminimumIndex = NonesecondMin = Nonefor i, val in enumerate(actual):skipSecMinCheck = Falseif minimum is None:minimum = valminimumIndex = ielif cmpFn(val, minimum):secondMin = minimumminimum = valminimumIndex = iskipSecMinCheck = Trueelif not skipSecMinCheck and (secondMin is None or cmpFn(val, secondMin)):secondMin = valactualI, actualIt = _iterators[minimumIndex]while not cmpFn(secondMin, minimum):yield (actualI, minimum)try:minimum = next(actualIt)except StopIteration:minimum = Nonebreakif minimum is None:del _iterators[minimumIndex]del actual[minimumIndex]else:actual[minimumIndex] = minimum", "docstring": "Sort items from iterators(generators) by alwas selecting item\nwith lowest value (min first)\n\n:return: generator of tuples (origin index, item) where origin index\n is index of iterator in \"iterators\" from where item commes from", "id": "f1315:m0"} {"signature": "def groupIntoChoices(splitsOnWord, wordWidth: int, origin: OneOfTransaction):", "body": "def cmpWordIndex(a, b):return a.startOfPart // wordWidth < b.startOfPart // wordWidthactual = NoneitCnt = len(splitsOnWord)for i, item in iterSort(splitsOnWord, cmpWordIndex):_actualW = item.startOfPart // wordWidthif actual is None:actual = ChoicesOfFrameParts(item.startOfPart, origin)actual.extend(ChoiceOfFrameParts(actual,origin.possibleTransactions[_i])for _i in range(itCnt))actualW = _actualWelif _actualW > actualW:actual.resolveEnd()yield actualactual = ChoicesOfFrameParts(item.startOfPart, origin)actual.extend(ChoiceOfFrameParts(actual,origin.possibleTransactions[_i])for _i in range(itCnt))actualW = _actualWactual[i].append(item)if actual is not None:actual.setIsLast(True)actual.resolveEnd()yield actual", "docstring": ":param splitsOnWord: list of lists of parts (fields splited on word\n boundaries)\n:return: generators of ChoicesOfFrameParts for each word\n which are not crossing word boundaries", "id": "f1315:m1"} {"signature": "def getBusWordBitRange(self) -> Tuple[int, int]:", "body": "offset = self.startOfPart % self.parent.wordWidthreturn (offset + self.bit_length(), offset)", "docstring": ":return: bit range which contains data of this part on bus data signal", "id": "f1315:c0:m2"} {"signature": "@internaldef staticEval(self):", "body": "for o in self.operands:o.staticEval()self.result._val = self.evalFn()", "docstring": "Recursively statistically evaluate result of this operator", "id": "f1316:c0:m2"} {"signature": "def eval(self, operator, simulator=None):", "body": "def getVal(v):while not isinstance(v, Value):v = v._valreturn voperands = list(map(getVal, operator.operands))if isEventDependentOp(operator.operator):operands.append(simulator.now)elif operator.operator == AllOps.IntToBits:operands.append(operator.result._dtype)return self._evalFn(*operands)", "docstring": "Load all operands and process them by self._evalFn", "id": "f1317:c0:m3"} {"signature": "def sensitivityByOp(op):", "body": "if op == AllOps.RISING_EDGE:return SENSITIVITY.RISINGelif op == AllOps.FALLING_EDGE:return SENSITIVITY.FALLINGelse:raise TypeError()", "docstring": "get sensitivity type for operator", "id": "f1317:m16"} {"signature": "def arr_any(iterable, fn):", "body": "for item in iterable:if fn(item):return Truereturn False", "docstring": ":return: True if fn(item) for any item else False", "id": "f1320:m2"} {"signature": "def find_files(directory, pattern, recursive=True):", "body": "if not os.path.isdir(directory):if os.path.exists(directory):raise IOError(directory + '')else:raise IOError(directory + \"\")if recursive:for root, _, files in os.walk(directory):for basename in files:if fnmatch.fnmatch(basename, pattern):filename = os.path.join(root, basename)yield filenameelse:root = directoryfor basename in os.listdir(root):if fnmatch.fnmatch(basename, pattern):filename = os.path.join(root, basename)if os.path.isfile(filename):yield filename", "docstring": "Find files by pattern in directory", "id": "f1322:m0"} {"signature": "def __init__(self, sigOrVal: Union[RtlSignal, Value],skipPadding: bool=True,fillup: bool=False):", "body": "self.it = walkFlattenFields(sigOrVal, skipPadding=skipPadding)self.fillup = fillupself.actuallyHave = self.actual = Noneself.actualOffset = ", "docstring": ":param skipPadding: if true padding is skipped in dense types", "id": "f1325:c2:m0"} {"signature": "def iterBits(sigOrVal: Union[RtlSignal, Value], bitsInOne: int=,skipPadding: bool=True, fillup: bool=False):", "body": "bw = BitWalker(sigOrVal, skipPadding, fillup)for _ in range(ceil(sigOrVal._dtype.bit_length() / bitsInOne)):yield bw.get(bitsInOne)bw.assertIsOnEnd()", "docstring": "Iterate over bits in vector\n\n:param sigOrVal: signal or value to iterate over\n:param bitsInOne: number of bits in one part\n:param skipPadding: if true padding is skipped in dense types", "id": "f1325:m2"} {"signature": "@internaldef _loadDeclarations(self):", "body": "if not hasattr(self, \"\"):self._interfaces = []if not hasattr(self, \"\"):self._private_interfaces = []if not hasattr(self, \"\"):self._units = []self._setAttrListener = self._declrCollectorself._declr()self._setAttrListener = Nonefor i in self._interfaces:self._loadInterface(i, True)for u in self._units:u._loadDeclarations()for p in self._params:p.setReadOnly()", "docstring": "Load all declarations from _decl() method, recursively\nfor all interfaces/units.", "id": "f1327:c0:m5"} {"signature": "@internal@classmethoddef _nextInstId(cls):", "body": "i = cls.__instCntrcls.__instCntr += return i", "docstring": "Get next instance id", "id": "f1328:c0:m1"} {"signature": "@internaldef statements_to_HWProcesses(statements: List[HdlStatement])-> Generator[HWProcess, None, None]:", "body": "statements = copy(statements)processes = []while statements:stm = statements.pop()proc_statements = [stm, ]ps = _statements_to_HWProcesses(proc_statements, True)processes.extend(ps)yield from reduceProcesses(processes)", "docstring": "Pack statements into HWProcess instances,\n* for each out signal resolve it's drivers and collect them\n* split statements if there is and combinational loop\n* merge statements if it is possible\n* resolve sensitivitilists\n* wrap into HWProcess instance\n* for every IO of process generate name if signal has not any", "id": "f1329:m3"} {"signature": "def _eq(self, other):", "body": "return self.naryOp(AllOps.EQ, tv(self)._eq, other)", "docstring": "__eq__ is not overloaded because it will destroy hashability of object", "id": "f1334:c0:m14"} {"signature": "@internaldef tryToMerge(procA: HWProcess, procB: HWProcess):", "body": "if (checkIfIsTooSimple(procA) orcheckIfIsTooSimple(procB) orareSetsIntersets(procA.outputs, procB.sensitivityList) orareSetsIntersets(procB.outputs, procA.sensitivityList) ornot HdlStatement._is_mergable_statement_list(procA.statements, procB.statements)):raise IncompatibleStructure()procA.statements = HdlStatement._merge_statement_lists(procA.statements, procB.statements)procA.outputs.extend(procB.outputs)procA.inputs.extend(procB.inputs)procA.sensitivityList.extend(procB.sensitivityList)return procA", "docstring": "Try merge procB into procA\n\n:raise IncompatibleStructure: if merge is not possible\n:attention: procA is now result if merge has succeed\n:return: procA which is now result of merge", "id": "f1336:m2"} {"signature": "@internaldef _registerArray(self, name, items):", "body": "items._parent = selfitems._name = namefor i, item in enumerate(items):setattr(self, \"\" % (name, i), item)", "docstring": "Register array of items on interface level object", "id": "f1339:c2:m13"} {"signature": "@internaldef _registerParameter(self, pName, parameter) -> None:", "body": "nameAvailabilityCheck(self, pName, parameter)try:hasName = parameter._name is not Noneexcept AttributeError:hasName = Falseif not hasName:parameter._name = pNameparameter._registerScope(pName, self)if parameter.hasGenericName:parameter.name = pNameif parameter._parent is None:parameter._parent = selfself._params.append(parameter)", "docstring": "Register Param object on interface level object", "id": "f1339:c2:m5"} {"signature": "@internaldef nameAvailabilityCheck(obj, propName, prop):", "body": "if getattr(obj, propName, None) is not None:raise IntfLvlConfErr(\"\" % (obj, propName, repr(getattr(obj, propName)), prop))", "docstring": "Check if not redefining property on obj", "id": "f1339:m0"} {"signature": "def _paramsShared(self, exclude=None, prefix=\"\") -> MakeParamsShared:", "body": "return MakeParamsShared(self, exclude=exclude, prefix=prefix)", "docstring": "Auto-propagate params by name to child components and interfaces\nUsage:\n\n.. code-block:: python\n\n with self._paramsShared():\n # your interfaces and unit which should share all params with \"self\" there\n\n:param exclude: params which should not be shared\n:param prefix: prefix which should be added to name of child parameters\n before parameter name matching", "id": "f1339:c2:m6"} {"signature": "def walkParams(intf, discovered):", "body": "for si in intf._interfaces:yield from walkParams(si, discovered)for p in intf._params:if p not in discovered:discovered.add(p)yield p", "docstring": "walk parameter instances on this interface", "id": "f1342:m1"} {"signature": "@internaldef _cleanAsSubunit(self):", "body": "for pi in self._entity.ports:pi.connectInternSig()for i in chain(self._interfaces, self._private_interfaces):i._clean()", "docstring": "Disconnect internal signals so unit can be reused by parent unit", "id": "f1346:c0:m2"} {"signature": "def _updateParamsFrom(self, otherObj, updater=_default_param_updater,exclude=None, prefix=\"\"):", "body": "PropDeclrCollector._updateParamsFrom(self, otherObj, updater, exclude, prefix)", "docstring": ":note: doc in :func:`~hwt.synthesizer.interfaceLevel.propDeclCollector._updateParamsFrom`", "id": "f1348:c0:m10"} {"signature": "def _bit_length(self):", "body": "try:interfaces = self._interfacesexcept AttributeError:interfaces = Noneif interfaces is None:_intf = self._clone()_intf._loadDeclarations()interfaces = _intf._interfacesif interfaces:w = for i in interfaces:w += i._bit_length()return welse:return self._dtype.bit_length()", "docstring": "Sum of all width of interfaces in this interface", "id": "f1348:c0:m11"} {"signature": "def _getPhysicalName(self):", "body": "if hasattr(self, \"\"):return self._boundedEntityPort.nameelse:return self._getFullName().replace('', self._NAME_SEPARATOR)", "docstring": "Get name in HDL", "id": "f1348:c0:m7"} {"signature": "def __init__(self, masterDir=DIRECTION.OUT,loadConfig=True):", "body": "self._setAttrListener = Noneself._associatedClk = Noneself._associatedRst = Noneself._parent = Nonesuper().__init__()self._masterDir = masterDirself._direction = INTF_DIRECTION.UNKNOWNself._ctx = Noneif loadConfig:self._loadConfig()self._isExtern = Falseself._isAccessible = Trueself._ag = None", "docstring": "This constructor is called when constructing new interface,\nit is usually done manually while creating Unit or\nautomatically while extracting interfaces from UnitWithSoure\n\n:param masterDir: direction which this interface should have for master\n:param multiplyedBy: this can be instance of integer or Param,\n this mean the interface is array of the interfaces\n where multiplyedBy is the size\n:param loadConfig: do load config in __init__", "id": "f1348:c0:m0"} {"signature": "def _getFullName(self):", "body": "return HObjList._getFullName(self)", "docstring": "get all name hierarchy separated by '.", "id": "f1348:c0:m8"} {"signature": "def _make_association(self, *args, **kwargs):", "body": "for o in self:o._make_association(*args, **kwargs)", "docstring": "Delegate _make_association on items\n\n:note: doc in :func:`~hwt.synthesizer.interfaceLevel.propDeclCollector._make_association`", "id": "f1349:c0:m3"} {"signature": "@internaldef _mkOp(fn):", "body": "def op(*operands, key=None) -> RtlSignalBase:\"\"\"\"\"\"assert operands, operandstop = Noneif key is not None:operands = map(key, operands)for s in operands:if top is None:top = selse:top = fn(top, s)return topreturn op", "docstring": "Function to create variadic operator function\n\n:param fn: function to perform binary operation", "id": "f1352:m2"} {"signature": "def __le__(self, other):", "body": "return self._sig.__le__(other)", "docstring": "<=", "id": "f1353:c0:m10"} {"signature": "def __ge__(self, other):", "body": "return self._sig.__ge__(other)", "docstring": ">=", "id": "f1353:c0:m9"} {"signature": "def __call__(self, source):", "body": "assert self._isAccessiblereturn self._sig(source)", "docstring": "connect this signal to driver\n\n:attention: it is not call of function it is operator of assignment\n:return: list of assignments", "id": "f1353:c0:m23"} {"signature": "def __and__(self, other):", "body": "return self._sig.__and__(other)", "docstring": "& operator - logical 'and' for one bit signals and hBool\nbitwise and for wider signals", "id": "f1353:c0:m12"} {"signature": "def onTWriteCallback__init(self, sim):", "body": "yield from self.onTWriteCallback(sim)self.intf.t._sigInside.registerWriteCallback(self.onTWriteCallback,self.getEnable)self.intf.o._sigInside.registerWriteCallback(self.onTWriteCallback,self.getEnable)", "docstring": "Process for injecting of this callback loop into simulator", "id": "f1359:c1:m5"} {"signature": "def getVld(self):", "body": "return self.intf.vld._sigInside", "docstring": "get \"valid\" signal", "id": "f1368:c0:m6"} {"signature": "def getRd(self):", "body": "return self.intf.rd._sigInside", "docstring": "get \"ready\" signal", "id": "f1368:c0:m3"} {"signature": "def driver(self, sim):", "body": "r = sim.readif self.actualData is NOP and self.data:self.actualData = self.data.popleft()doSend = self.actualData is not NOPif self.actualData is not self._lastWritten:if doSend:self.doWrite(sim, self.actualData)else:self.doWrite(sim, None)self._lastWritten = self.actualDataen = self.notReset(sim)vld = int(en and doSend)if self._lastVld is not vld:self.wrVld(sim.write, vld)self._lastVld = vldif not self._enabled:sim.add_process(self.checkIfRdWillBeValid(sim))returnyield sim.waitOnCombUpdate()rd = self.isRd(r)assert rd.vldMask, (sim.now, self.intf,\"\")if not vld:returnif rd.val:if self._debugOutput is not None:self._debugOutput.write(\"\" % (self.intf._getFullName(),sim.now,self.actualData))a = self.actualDataif self.data:self.actualData = self.data.popleft()else:self.actualData = NOPonDriverWriteAck = getattr(self, \"\", None)if onDriverWriteAck is not None:onDriverWriteAck(sim)onDone = getattr(a, \"\", None)if onDone is not None:onDone(sim)", "docstring": "Push data to interface\n\nset vld high and wait on rd in high then pass new data", "id": "f1368:c0:m13"} {"signature": "def pullUpAfter(sig, initDelay= * Time.ns):", "body": "def _pullDownAfter(s):s.write(False, sig)yield s.wait(initDelay)s.write(True, sig)return _pullDownAfter", "docstring": ":return: Simulation driver which keeps signal value low for initDelay then\n it sets value to 1", "id": "f1369:m1"} {"signature": "def propagateClkRst(obj):", "body": "clk = obj.clkrst = obj.rstfor u in obj._units:_tryConnect(clk, u, '')_tryConnect(~rst, u, '')_tryConnect(rst, u, '')", "docstring": "Propagate \"clk\" clock and reset \"rst\" signal to all subcomponents", "id": "f1372:m5"} {"signature": "def propagateRstn(obj):", "body": "rst_n = obj.rst_nfor u in obj._units:_tryConnect(rst_n, u, '')_tryConnect(~rst_n, u, '')", "docstring": "Propagate negative reset \"rst_n\" signal\nto all subcomponents", "id": "f1372:m6"} {"signature": "def propagateClk(obj):", "body": "clk = obj.clkfor u in obj._units:_tryConnect(clk, u, '')", "docstring": "Propagate \"clk\" clock signal to all subcomponents", "id": "f1372:m3"} {"signature": "@internaldef _serializeParamsUniq_eval(parentUnit, obj, isDeclaration, priv):", "body": "params = paramsToValTuple(parentUnit)if priv is None:priv = {}if isDeclaration:try:prevUnit = priv[params]except KeyError:priv[params] = parentUnitreturn True, privprepareEntity(obj, prevUnit._entity.name, prevUnit)return False, privreturn priv[params] is parentUnit, priv", "docstring": "Decide to serialize only objs with uniq parameters and class\n\n:param priv: private data for this function\n ({frozen_params: obj})\n\n:return: tuple (do serialize this object, next priv)", "id": "f1374:m8"} {"signature": "def registerMUX(self, stm: Union[HdlStatement, Operator], sig: RtlSignal,inputs_cnt: int):", "body": "assert inputs_cnt > res = self.resourcesw = sig._dtype.bit_length()k = (ResourceMUX, w, inputs_cnt)res[k] = res.get(k, ) + self.resource_for_object[(stm, sig)] = k", "docstring": "mux record is in format (self.MUX, n, m)\nwhere n is number of bits of this mux\nand m is number of possible inputs", "id": "f1385:c0:m2"} {"signature": "@internaldef serialzeValueToTCL(self, val, do_eval=False) -> Tuple[str, str, bool]:", "body": "if isinstance(val, int):val = hInt(val)if do_eval:val = val.staticEval()if isinstance(val, RtlSignalBase):ctx = VivadoTclExpressionSerializer.getBaseContext()tclVal = VivadoTclExpressionSerializer.asHdl(val, ctx)tclValVal = VivadoTclExpressionSerializer.asHdl(val.staticEval())return tclVal, tclValVal, Falseelse:tclVal = VivadoTclExpressionSerializer.asHdl(val, None)return tclVal, tclVal, True", "docstring": ":see: doc of method on parent class", "id": "f1386:c1:m16"} {"signature": "@internaldef getTypeWidth(self, dtype: HdlType, do_eval=False) -> Tuple[int, Union[int, RtlSignal], bool]:", "body": "width = dtype.widthif isinstance(width, int):widthStr = str(width)else:widthStr = self.getExprVal(width, do_eval=do_eval)return width, widthStr, False", "docstring": ":see: doc of method on parent class", "id": "f1386:c1:m14"} {"signature": "@classmethoddef HWProcess(cls, proc, ctx):", "body": "body = proc.statementsextraVars = []extraVarsSerialized = []hasToBeVhdlProcess = extraVars orarr_any(body,lambda x: isinstance(x,(IfContainer,SwitchContainer,WhileContainer,WaitStm)) or(isinstance(x, Assignment) andx.indexes))anyIsEventDependnt = arr_any(proc.sensitivityList, lambda s: isinstance(s, Operator))sensitivityList = sorted(map(lambda s: cls.sensitivityListItem(s, ctx,anyIsEventDependnt),proc.sensitivityList))if hasToBeVhdlProcess:childCtx = ctx.withIndent()else:childCtx = ctxdef createTmpVarFn(suggestedName, dtype):s = SignalItem(None, dtype, virtualOnly=True)s.name = childCtx.scope.checkedName(suggestedName, s)s.hidden = FalseserializedS = cls.SignalItem(s, childCtx, declaration=True)extraVars.append(s)extraVarsSerialized.append(serializedS)return schildCtx.createTmpVarFn = createTmpVarFnstatemets = [cls.asHdl(s, childCtx) for s in body]if hasToBeVhdlProcess:proc.name = ctx.scope.checkedName(proc.name, proc)extraVarsInit = []for s in extraVars:a = Assignment(s.defVal, s, virtualOnly=True)extraVarsInit.append(cls.Assignment(a, childCtx))return cls.processTmpl.render(indent=getIndent(ctx.indent),name=proc.name,hasToBeVhdlProcess=hasToBeVhdlProcess,extraVars=extraVarsSerialized,sensitivityList=\"\".join(sensitivityList),statements=extraVarsInit + statemets)", "docstring": "Serialize HWProcess objects", "id": "f1400:c0:m1"} {"signature": "@classmethoddef hardcodeRomIntoProcess(cls, rom):", "body": "processes = []signals = []for e in rom.endpoints:assert isinstance(e, Operator) and e.operator == AllOps.INDEX, eme, index = e.operandsassert me is romromValSig = rom.ctx.sig(rom.name, dtype=e.result._dtype)signals.append(romValSig)romValSig.hidden = Falsecases = [(toHVal(i), [romValSig(v), ])for i, v in enumerate(rom.defVal.val)]statements = [SwitchContainer(index, cases), ]for (_, (stm, )) in cases:stm.parentStm = statements[] p = HWProcess(rom.name, statements, {index, },{index, }, {romValSig, })processes.append(p)def replaceOrigRomIndexExpr(x):if x is e.result:return romValSigelse:return xfor _e in e.result.endpoints:_e.operands = tuple(map(replaceOrigRomIndexExpr, _e.operands))e.result = romValSigreturn processes, signals", "docstring": "Due to verilog restrictions it is not posible to use array constants\nand rom memories has to be hardcoded as process", "id": "f1404:c0:m2"} {"signature": "@classmethoddef Value(cls, val, ctx: SerializerCtx):", "body": "t = val._dtypeif isinstance(val, RtlSignalBase):return cls.SignalItem(val, ctx)c = cls.Value_try_extract_as_const(val, ctx)if c:return cif isinstance(t, Slice):return cls.Slice_valAsHdl(t, val, ctx)elif isinstance(t, HArray):return cls.HArrayValAsHdl(t, val, ctx)elif isinstance(t, Bits):return cls.Bits_valAsHdl(t, val, ctx)elif isinstance(t, HBool):return cls.Bool_valAsHdl(t, val, ctx)elif isinstance(t, HEnum):return cls.HEnumValAsHdl(t, val, ctx)elif isinstance(t, Integer):return cls.Integer_valAsHdl(t, val, ctx)elif isinstance(t, String):return cls.String_valAsHdl(t, val, ctx)else:raise SerializerException(\"\"% (val))", "docstring": ":param dst: is signal connected with value\n:param val: value object, can be instance of Signal or Value", "id": "f1414:c0:m0"} {"signature": "def getIndent(indentNum):", "body": "try:return _indentCache[indentNum]except KeyError:i = \"\".join([_indent for _ in range(indentNum)])_indentCache[indentNum] = ireturn i", "docstring": "Cached indent getter function", "id": "f1420:m0"} {"signature": "@classmethoddef serializationDecision(cls, obj, serializedClasses,serializedConfiguredUnits):", "body": "isDeclaration = isinstance(obj, Entity)isDefinition = isinstance(obj, Architecture)if isDeclaration:unit = obj.originelif isDefinition:unit = obj.entity.originelse:return Trueassert isinstance(unit, Unit)sd = unit._serializeDecisionif sd is None:return Trueelse:prevPriv = serializedClasses.get(unit.__class__, None)seriazlize, nextPriv = sd(unit, obj, isDeclaration, prevPriv)serializedClasses[unit.__class__] = nextPrivreturn seriazlize", "docstring": "Decide if this unit should be serialized or not eventually fix name\nto fit same already serialized unit\n\n:param obj: object to serialize\n:param serializedClasses: dict {unitCls : unitobj}\n:param serializedConfiguredUnits: (unitCls, paramsValues) : unitObj\n where paramsValues are named tuple name:value", "id": "f1421:c1:m6"} {"signature": "@internaldef ternaryOpsToIf(statements):", "body": "stms = []for st in statements:if isinstance(st, Assignment):try:if not isinstance(st.src, RtlSignalBase):raise DoesNotContainsTernary()d = st.src.singleDriver()if not isinstance(d, Operator) or d.operator != AllOps.TERNARY:raise DoesNotContainsTernary()else:ops = d.operandsifc = IfContainer(ops[],[Assignment(ops[], st.dst)],[Assignment(ops[], st.dst)])stms.append(ifc)continueexcept (MultipleDriversErr, DoesNotContainsTernary):passexcept NoDriverErr:assert (hasattr(st.src, \"\")and st.src._interface is not None)or st.src.defVal.vldMask, st.srcstms.append(st)return stms", "docstring": "Convert all ternary operators to IfContainers", "id": "f1426:m0"} {"signature": "@classmethoddef HWProcess(cls, proc, ctx):", "body": "body = proc.statementsextraVars = []extraVarsSerialized = []hasToBeVhdlProcess = arr_any(body,lambda x: isinstance(x,(IfContainer,SwitchContainer,WhileContainer,WaitStm)))sensitivityList = sorted(map(lambda s: cls.sensitivityListItem(s, ctx),proc.sensitivityList))if hasToBeVhdlProcess:childCtx = ctx.withIndent()else:childCtx = copy(ctx)def createTmpVarFn(suggestedName, dtype):s = RtlSignal(None, None, dtype, virtualOnly=True)s.name = ctx.scope.checkedName(suggestedName, s)s.hidden = FalseserializedS = cls.SignalItem(s, childCtx, declaration=True)extraVars.append(s)extraVarsSerialized.append(serializedS)return schildCtx.createTmpVarFn = createTmpVarFnstatemets = [cls.asHdl(s, childCtx) for s in body]proc.name = ctx.scope.checkedName(proc.name, proc)extraVarsInit = []for s in extraVars:if isinstance(s.defVal, RtlSignalBase) or s.defVal.vldMask:a = Assignment(s.defVal, s, virtualOnly=True)extraVarsInit.append(cls.Assignment(a, childCtx))else:assert s.drivers, sfor d in s.drivers:extraVarsInit.append(cls.asHdl(d, childCtx))_hasToBeVhdlProcess = hasToBeVhdlProcesshasToBeVhdlProcess = extraVars or hasToBeVhdlProcessif hasToBeVhdlProcess and not _hasToBeVhdlProcess:oneIndent = getIndent()statemets = list(map(lambda x: oneIndent + x, statemets))return cls.processTmpl.render(indent=getIndent(ctx.indent),name=proc.name,hasToBeVhdlProcess=hasToBeVhdlProcess,extraVars=extraVarsSerialized,sensitivityList=\"\".join(sensitivityList),statements=extraVarsInit + statemets)", "docstring": "Serialize HWProcess objects as VHDL\n\n:param scope: name scope to prevent name collisions", "id": "f1426:c1:m1"} {"signature": "def logChange(self, nowTime, sig, nextVal):", "body": "try:hwProc = self.registered[sig]except KeyError:returnif hwProc.actualTime < nowTime:a = hwProc.actualTimeif a < :a = delay = int(nowTime - a)if delay > :hwProc.statements.append(WaitStm(int(delay) // ))hwProc.actualTime = nowTimetry:nextVal._dtype.forceVector = hwProc.driverFor._dtype.forceVectorexcept AttributeError:passhwProc.statements.extend(connect(nextVal, hwProc.driverFor))", "docstring": "This method is called for every value change of any signal.", "id": "f1434:c0:m2"} {"signature": "def logApplyingValues(self, simulator, values):", "body": "", "docstring": "Log simulator value quantum applied", "id": "f1435:c0:m4"} {"signature": "@internaldef mkArrayUpdater(nextItemVal: Value, indexes: Tuple[Value],invalidate: bool):", "body": "def updater(currentVal):if len(indexes) > :raise NotImplementedError(\"\")_nextItemVal = nextItemVal.clone()if invalidate:_nextItemVal.vldMask = index = indexes[]change = valueHasChanged(currentVal._getitem__val(index), _nextItemVal)currentVal._setitem__val(index, _nextItemVal)return (change, currentVal)return updater", "docstring": "Create value updater for simulation for value of array type\n\n:param nextVal: instance of Value which will be asssiggned to signal\n:param indexes: tuple on indexes where value should be updated\n in target array\n\n:return: function(value) -> tuple(valueHasChangedFlag, nextVal)", "id": "f1436:m4"} {"signature": "@internaldef sensitivity(proc: HWProcess, *sensitiveTo):", "body": "for s in sensitiveTo:if isinstance(s, tuple):sen, s = sif sen == SENSITIVITY.ANY:s.simSensProcs.add(proc)elif sen == SENSITIVITY.RISING:s.simRisingSensProcs.add(proc)elif sen == SENSITIVITY.FALLING:s.simFallingSensProcs.add(proc)else:raise AssertionError(sen)else:s.simSensProcs.add(proc)", "docstring": "register sensitivity for process", "id": "f1436:m0"} {"signature": "def assertValSequenceEqual(self, seq1, seq2, msg=None, seq_type=None):", "body": "seq1 = allValuesToInts(seq1)self.assertSequenceEqual(seq1, seq2, msg, seq_type)", "docstring": "An equality assertion for ordered sequences (like lists and tuples).\nFor the purposes of this function, a valid ordered sequence type is one\nwhich can be indexed, has a length, and has an equality operator.\n\nArgs:\n\n:param seq1: can contain instance of values or nested list of them\n:param seq2: items are not converted\n:param seq_type: The expected datatype of the sequences, or None if no\n datatype should be enforced.\n:param msg: Optional message to use on failure instead of a list of\n differences.", "id": "f1437:c0:m6"} {"signature": "def registerWriteCallback(self, callback, getEnFn) -> int:", "body": "index = len(self._writeCallbacks)self._writeCallbacks.append(None)self._writeCallbacksToEn.append((index, callback, getEnFn))return index", "docstring": "Register writeCallback for signal.\nRegistration is evaluated at the end of deltastep of simulator.\n\n:param callback: simulation process represented by function(simulator)\n which should be called after update of this signal\n:param getEnFn: function() to get initial value for enable of callback", "id": "f1443:c0:m1"} {"signature": "def simUpdateVal(self, simulator, valUpdater):", "body": "dirtyFlag, newVal = valUpdater(self._oldVal)if dirtyFlag:self._val = newValnewVal.updateTime = simulator.nowlog = simulator.config.logChangeif log:log(simulator.now, self, newVal)self.simPropagateChanges(simulator)", "docstring": "Method called by simulator to update new value for this object", "id": "f1443:c0:m4"} {"signature": "@internaldef _runSeqProcesses(self) -> Generator[None, None, None]:", "body": "updates = []for proc in self._seqProcsToRun:try:outContainer = self._outputContainers[proc]except KeyError:outContainer = Noneproc(self, outContainer)if outContainer is not None:updates.append(outContainer)self._seqProcsToRun = UniqList()self._runSeqProcessesPlaned = Falsefor cont in updates:for sigName, sig in cont._all_signals:newVal = getattr(cont, sigName)if newVal is not None:v = self._conflictResolveStrategy(newVal)updater, _ = vsig.simUpdateVal(self, updater)setattr(cont, sigName, None)returnyield", "docstring": "Delta step for event dependent processes", "id": "f1445:c6:m10"} {"signature": "def __init__(self, dstSignalsTuples):", "body": "self._all_signals = []for name, s in dstSignalsTuples:setattr(self, name, None)self._all_signals.append([name, s])", "docstring": ":param dstSignalsTuples: tuples (name, signal)", "id": "f1445:c0:m0"} {"signature": "def add_process(self, proc) -> None:", "body": "self._events.push(self.now, PRIORITY_NORMAL, proc)", "docstring": "Add process to events with default priority on current time", "id": "f1445:c6:m15"} {"signature": "def waitOnCombUpdate(self) -> Event:", "body": "if not self._combUpdateDonePlaned:return self._scheduleCombUpdateDoneEv()else:return self.combUpdateDoneEv", "docstring": "Sim processes can wait on combUpdateDone by:\nyield sim.waitOnCombUpdate()\n\nSim process is then woken up when all combinational updates\nare done in this delta step", "id": "f1445:c6:m2"} {"signature": "@internaldef _applyValues(self) -> Generator[None, None, None]:", "body": "va = self._valuesToApplyself._applyValPlaned = Falselav = self.config.logApplyingValuesif va and lav:lav(self, va)self._valuesToApply = []addSp = self._seqProcsToRun.appendfor s, vUpdater, isEventDependent, comesFrom in va:if isEventDependent:addSp(comesFrom)else:s.simUpdateVal(self, vUpdater)self._runCombProcesses()if self._valuesToApply and not self._applyValPlaned:self._scheduleApplyValues()returnyield", "docstring": "Perform delta step by writing stacked values to signals", "id": "f1445:c6:m11"} {"signature": "@internaldef raise_StopSimulation(sim):", "body": "raise StopSimumulation()returnyield", "docstring": "Simulation process used to stop simulation", "id": "f1445:m1"} {"signature": "@internaldef _scheduleApplyValues(self) -> None:", "body": "assert not self._applyValPlaned, self.nowself._add_process(self._applyValues(), PRIORITY_APPLY_COMB)self._applyValPlaned = Trueif self._runSeqProcessesPlaned:returnassert not self._seqProcsToRun and not self._runSeqProcessesPlaned, self.nowself._add_process(self._runSeqProcesses(), PRIORITY_APPLY_SEQ)self._runSeqProcessesPlaned = True", "docstring": "Apply stashed values to signals", "id": "f1445:c6:m7"} {"signature": "def getDrivers(self):", "body": "return [self.driver]", "docstring": "Called before simulation to collect all drivers of interfaces\nfrom this agent", "id": "f1446:c0:m4"} {"signature": "@internaldef reconnectUnitSignalsToModel(synthesisedUnitOrIntf, modelCls):", "body": "obj = synthesisedUnitOrIntfsubInterfaces = obj._interfacesif subInterfaces:for intf in subInterfaces:reconnectUnitSignalsToModel(intf, modelCls)else:s = synthesisedUnitOrIntfs._sigInside = getattr(modelCls, s._sigInside.name)", "docstring": "Reconnect model signals to unit to run simulation with simulation model\nbut use original unit interfaces for communication\n\n:param synthesisedUnitOrIntf: interface where should be signals\n replaced from signals from modelCls\n:param modelCls: simulation model form where signals\n for synthesisedUnitOrIntf should be taken", "id": "f1449:m2"} {"signature": "def __init__(self, sig: SimSignal, fn, shouldBeEnabledFn):", "body": "assert not isinstance(fn, CallbackLoop)self.fn = fnself.isGenerator = inspect.isgeneratorfunction(fn)self.shouldBeEnabledFn = shouldBeEnabledFnself._callbackIndex = Nonetry:self.sig = sig._sigInsideexcept AttributeError:self.sig = sig", "docstring": ":param sig: signal on which write callback should be used\n:attention: if condFn is None callback function is always executed\n\n:ivra fn: function/generator which is callback which should be executed\n:ivar isGenerator: flag if callback function is generator\n or normal function\n:ivar _callbackIndex: index of callback in write callbacks on sig,\n if is None callback was not registered yet\n:ivar shouldBeEnabledFn: function() -> bool, which returns True if this\n callback loop should be enabled", "id": "f1449:c0:m0"} {"signature": "@internaldef _simUnitVcd(simModel, stimulFunctions, outputFile, until):", "body": "sim = HdlSimulator()sim.config = VcdHdlSimConfig(outputFile)sim.simUnit(simModel, until=until, extraProcesses=stimulFunctions)return sim", "docstring": ":param unit: interface level unit to simulate\n:param stimulFunctions: iterable of function(env)\n (simpy environment) which are driving the simulation\n:param outputFile: file where vcd will be dumped\n:param time: endtime of simulation, time units are defined in HdlSimulator\n:return: hdl simulator object", "id": "f1449:m4"} {"signature": "def selectBit(val, bitNo):", "body": "return (val >> bitNo) & ", "docstring": "select bit from integer", "id": "f1450:m2"} {"signature": "def Default(self, *statements):", "body": "assert self.parentStm is Noneself.rank += self.default = []self._register_stements(statements, self.default)return self", "docstring": "c-like default of switch statement", "id": "f1451:c1:m4"} {"signature": "def rol(sig, howMany) -> RtlSignalBase:", "body": "width = sig._dtype.bit_length()return sig[(width - howMany):]._concat(sig[:(width - howMany)])", "docstring": "Rotate left", "id": "f1451:m6"} {"signature": "def connect(src, *destinations, exclude: set=None, fit=False):", "body": "assignemnts = []if isinstance(src, HObjList):for dst in destinations:assert len(src) == len(dst), (src, dst)_destinations = [iter(d) for d in destinations]for _src in src:dsts = [next(d) for d in _destinations]assignemnts.append(connect(_src, *dsts, exclude=exclude, fit=fit))else:for dst in destinations:assignemnts.append(_connect(src, dst, exclude, fit))return assignemnts", "docstring": "Connect src (signals/interfaces/values) to all destinations\n\n:param exclude: interfaces on any level on src or destinations\n which should be excluded from connection process\n:param fit: auto fit source width to destination width", "id": "f1451:m3"} {"signature": "def addCases(self, tupesValStmnts):", "body": "s = selffor val, statements in tupesValStmnts:s = s.Case(val, statements)return s", "docstring": "Add multiple case statements from iterable of tuleles\n(caseVal, statements)", "id": "f1451:c1:m2"} {"signature": "def __init__(self, parent, stateT, stateRegName=\"\"):", "body": "if isinstance(stateT, HEnum):beginVal = stateT.fromPy(stateT._allValues[])else:beginVal = self.stateReg = parent._reg(stateRegName, stateT, beginVal)Switch.__init__(self, self.stateReg)", "docstring": ":param parent: parent unit where fsm should be builded\n:param stateT: enum type of state\n:param stateRegName: name of register where sate is stored", "id": "f1451:c2:m0"} {"signature": "def sll(sig, howMany) -> RtlSignalBase:", "body": "width = sig._dtype.bit_length()return sig[(width - howMany):]._concat(vec(, howMany))", "docstring": "Logical shift left", "id": "f1451:m7"} {"signature": "def log2ceil(x):", "body": "if not isinstance(x, (int, float)):x = int(x)if x == or x == :res = else:res = math.ceil(math.log2(x))return hInt(res)", "docstring": "Returns no of bits required to store x-1\nfor example x=8 returns 3", "id": "f1451:m9"} {"signature": "@propertydef texts(self):", "body": "return [subpod.plaintext for subpod in self.subpod]", "docstring": "The text from each subpod in this pod as a list.", "id": "f1458:c7:m2"} {"signature": "def gen_keywords(*args: Union[ANSIColors, ANSIStyles], **kwargs: Union[ANSIColors, ANSIStyles]) -> tuple:", "body": "fields: tuple = tuple()values: tuple = tuple()for tpl in args:fields += tpl._fieldsvalues += tplfor prefix, tpl in kwargs.items():fields += tuple(map(lambda x: ''.join([prefix, x]), tpl._fields))values += tplreturn namedtuple('', fields)(*values)", "docstring": "generate single escape sequence mapping.", "id": "f1503:m0"} {"signature": "def fold_map(self, fa: F[A], z: B, f: Callable[[A], B], g: Callable[[Z, B], Z]=operator.add) -> Z:", "body": "mapped = Functor.fatal(type(fa)).map(fa, f)return self.fold_left(mapped)(z)(g)", "docstring": "map `f` over the traversable, then fold over the result\n using the supplied initial element `z` and operation `g`,\n defaulting to addition for the latter.", "id": "f1547:c1:m7"} {"signature": "@abc.abstractmethoddef ap(self, fa: F, f: F) -> F:", "body": "...", "docstring": "f should be an F[Callable[[A], B]]", "id": "f1556:c0:m0"} {"signature": "def set_log_level(log_level):", "body": "if not LOGBOOK_INSTALLED:returnlogbook.get_level_name(log_level)if log_level == logger.level:returnif log_level == logbook.NOTSET:set_logger(is_enable=False)else:set_logger(is_enable=True)logger.level = log_leveltry:import pytablewriterpytablewriter.set_log_level(log_level)except ImportError:passtabledata.set_log_level(log_level)", "docstring": "Set logging level of this module. Using\n`logbook `__ module for logging.\n\n:param int log_level:\n One of the log level of\n `logbook `__.\n Disabled logging if ``log_level`` is ``logbook.NOTSET``.\n:raises LookupError: If ``log_level`` is an invalid value.", "id": "f1604:m1"} {"signature": "@register.filterdef user_can_edit(obj, user):", "body": "return obj.user_can_edit(user) if hasattr(obj, \"\") else None", "docstring": "If a model implements the user_can_edit method,\nthis filter returns the result of the method.", "id": "f1609:m12"} {"signature": "@register.simple_tagdef jsfile(url):", "body": "if not url.startswith('') and not url[:] == '':url = settings.STATIC_URL + urlreturn ''.format(src=url)", "docstring": "Output a script tag to a js file.", "id": "f1609:m2"} {"signature": "@register.simple_tagdef table(rows):", "body": "output = ''for row in rows:output += ''for column in row:output += ''.format(s=column)output += ''output += ''return output", "docstring": "Output a simple table with several columns.", "id": "f1609:m0"} {"signature": "@register.filterdef model_verbose(obj, capitalize=True):", "body": "if isinstance(obj, ModelForm):name = obj._meta.model._meta.verbose_nameelif isinstance(obj, Model):name = obj._meta.verbose_nameelse:raise Exception('' + type(obj))return name.capitalize() if capitalize else name", "docstring": "Return the verbose name of a model.\nThe obj argument can be either a Model instance, or a ModelForm instance.\nThis allows to retrieve the verbose name of the model of a ModelForm\neasily, without adding extra context vars.", "id": "f1609:m11"} {"signature": "def pre_save(self, instance):", "body": "pass", "docstring": "Hook for editing the instance.", "id": "f1611:c11:m3"} {"signature": "def post_delete(self, object):", "body": "", "docstring": "Hook for alterations after delete.", "id": "f1611:c3:m3"} {"signature": "def get_config(key, default=None):", "body": "return getattr(settings, key, default)", "docstring": "Get settings from django.conf if exists,\nreturn default value otherwise\n\nexample:\n\nADMIN_EMAIL = get_config('ADMIN_EMAIL', 'default@email.com')", "id": "f1613:m1"} {"signature": "def resolve_class(class_path):", "body": "modulepath, classname = class_path.rsplit('', )module = __import__(modulepath, fromlist=[classname])return getattr(module, classname)", "docstring": "Load a class by a fully qualified class_path,\neg. myapp.models.ModelName", "id": "f1613:m4"} {"signature": "def get_object_or_none(qs, *args, **kwargs):", "body": "try:return qs.get(*args, **kwargs)except models.ObjectDoesNotExist:return None", "docstring": "Try to retrieve a model, and return None if\nit is not found.\nUseful if you do not want to bother with the try/except block.", "id": "f1617:m0"} {"signature": "@cli.command('')@click.argument('', type=str)@click.pass_contextdef scrape(ctx, url):", "body": "data = load_feed(url)feed = data['']entries = data['']_type = ''country = ''for entry in entries:_id = sluggify(entry[''])city = entry[''][]['']landing = entry['']start_time = dt_normalize(entry[''], local_tz=True)title = entry['']summary = entry['']link = entry['']ipdb.set_trace()", "docstring": "Rip the events from a given rss feed, normalize the data and store.", "id": "f1622:m2"} {"signature": "def get_token(client_id, client_secret, client_access_token, page=None):", "body": "payload = {'': '','': client_id,'': client_secret,}if client_access_token:payload[''] = ''payload[''] = client_access_tokenresponse = requests.post('',params=payload)access_token = response.json()['']return access_token", "docstring": "See: http://nodotcom.org/python-facebook-tutorial.html", "id": "f1625:m0"} {"signature": "def _create_msg(self, to, subject, msgHtml, msgPlain, attachments=None):", "body": "sender = self.senderif attachments and isinstance(attachments, str):attachments = [attachments]else:attachments = list(attachments or [])msg = MIMEMultipart('')msg[''] = subjectmsg[''] = sendermsg[''] = tomsg.attach(MIMEText(msgPlain, ''))msg.attach(MIMEText(msgHtml, ''))for path in attachments:_attachment = self._prep_attachment(path)msg.attach(_attachment)raw = base64.urlsafe_b64encode(msg.as_bytes()).decode()body = {'': raw}return body", "docstring": "attachments should be a list of paths", "id": "f1630:c0:m5"} {"signature": "def emit(self, record):", "body": "if getattr(this, '', {}).get(LOGS_NAME, False):self.format(record)this.send({'': ADDED,'': LOGS_NAME,'': meteor_random_id('' % LOGS_NAME),'': {attr: {'': lambda args: [repr(arg) for arg in args],'': datetime.datetime.fromtimestamp,'': stacklines_or_none,}.get(attr,lambda val: val )(getattr(record, attr, None))for attr in ('','','','','','','','','','','','','','','','','','','',)},})", "docstring": "Emit a formatted log record via DDP.", "id": "f1635:c0:m0"} {"signature": "def database_backwards(self, app_label, schema_editor, from_state, to_state):", "body": "self.truncate(app_label, schema_editor, self.truncate_backwards)", "docstring": "Use schema_editor to apply any reverse changes.", "id": "f1642:c0:m4"} {"signature": "def truncate(self, app_label, schema_editor, models):", "body": "for model_name in models:model = '' % (app_label, model_name)schema_editor.execute('' % (model.lower(),),)", "docstring": "Truncate tables.", "id": "f1642:c0:m1"} {"signature": "def ddpp_sockjs_info(environ, start_response):", "body": "import randomimport ejsonstart_response('',[('', ''),] + common_headers(environ),)yield ejson.dumps(collections.OrderedDict([('', True),('', ['',]),('', False),('', random.getrandbits()),]))", "docstring": "Inform client that WebSocket service is available.", "id": "f1649:m2"} {"signature": "def run(self):", "body": "self.logger.debug('')self.start()self._stop_event.wait()gevent.joinall(self.threads + [DDPLauncher.pgworker])self.threads = []", "docstring": "Run DDP greenlets.", "id": "f1649:c0:m7"} {"signature": "def add_web_servers(self, listen_addrs, debug=False, **ssl_args):", "body": "self.servers.extend(self.get_web_server(listen_addr, debug=debug, **ssl_args)for listen_addr in listen_addrs)", "docstring": "Add WebSocketServer for each (host, port) in listen_addrs.", "id": "f1649:c0:m2"} {"signature": "def get_auth_hash(user, purpose):", "body": "return iter_auth_hashes(user, purpose, minutes_valid=).next()", "docstring": "Generate a user hash for a particular purpose.", "id": "f1651:m1"} {"signature": "def do_login(self, user):", "body": "this.user_id = user.pkthis.user_ddp_id = get_meteor_id(user)this.user_sub_id = meteor_random_id()API.do_sub(this.user_sub_id, '', silent=True)self.update_subs(user.pk)user_logged_in.send(sender=user.__class__, request=this.request, user=user,)", "docstring": "Login a user.", "id": "f1651:c4:m9"} {"signature": "@api_endpointdef update(self, selector, update, options=None):", "body": "del optionsuser = get_object(self.model, selector[''],pk=this.user_id,)profile_update = self.deserialize_profile(update[''], key_prefix='', pop=True,)if len(update['']) != :raise MeteorError(, '')for key, val in profile_update.items():setattr(user, key, val)user.save()", "docstring": "Update user data.", "id": "f1651:c1:m2"} {"signature": "@api_endpoint('')def forgot_password(self, params):", "body": "username = self.get_username(params)try:user = self.user_model.objects.get(**{self.user_model.USERNAME_FIELD: username,})except self.user_model.DoesNotExist:self.auth_failed()minutes_valid = HASH_MINUTES_VALID[HashPurpose.PASSWORD_RESET]token = get_user_token(user=user, purpose=HashPurpose.PASSWORD_RESET,minutes_valid=minutes_valid,)forgot_password.send(sender=__name__,user=user,token=token,request=this.request,expiry_date=calc_expiry_time(minutes_valid),)", "docstring": "Request password reset email.", "id": "f1651:c4:m16"} {"signature": "def iter_auth_hashes(user, purpose, minutes_valid):", "body": "now = timezone.now().replace(microsecond=, second=)for minute in range(minutes_valid + ):yield hashlib.sha1('' % (now - datetime.timedelta(minutes=minute),user.password,purpose,user.pk,settings.SECRET_KEY,),).hexdigest()", "docstring": "Generate auth tokens tied to user and specified purpose.\n\nThe hash expires at midnight on the minute of now + minutes_valid, such\nthat when minutes_valid=1 you get *at least* 1 minute to use the token.", "id": "f1651:m0"} {"signature": "def ready(self):", "body": "THREAD_LOCAL_FACTORIES[''] = self.user_factory", "docstring": "Called after AppConfig.ready().", "id": "f1651:c4:m1"} {"signature": "@staticmethoddef check_secure():", "body": "if this.request.is_secure():return True elif this.request.META[''] in ['','',]:return True raise MeteorError(, '')", "docstring": "Check request, return False if using SSL or local connection.", "id": "f1651:c4:m5"} {"signature": "def set_seed(self, val):", "body": "self._streams = {}self._seed = val", "docstring": "Set current PRNG seed.", "id": "f1653:c3:m2"} {"signature": "def __init__(self):", "body": "if self._init_done:raise SystemError('')self._init_done = True", "docstring": "Create new thread storage instance.", "id": "f1653:c2:m0"} {"signature": "def __init__(self):", "body": "self.n = ", "docstring": "Initialise state.", "id": "f1654:c0:m0"} {"signature": "def __init__(self, *args):", "body": "self.seed(args)", "docstring": "Initialise Alea state from seeds (args).", "id": "f1654:c1:m0"} {"signature": "def get_meteor_ids(model, object_ids):", "body": "meta = model._metaresult = collections.OrderedDict((str(obj_pk), None)for obj_pkin object_ids)if isinstance(meta.pk, AleaIdField):return collections.OrderedDict((obj_pk, obj_pk) for obj_pk in object_ids)alea_unique_fields = [fieldfor field in meta.local_fieldsif isinstance(field, AleaIdField) and field.unique and not field.null]if len(alea_unique_fields) == :aid = alea_unique_fields[].namequery = model.objects.filter(pk__in=object_ids,).values_list('', aid)else:content_type = ContentType.objects.get_for_model(model)query = ObjectMapping.objects.filter(content_type=content_type,object_id__in=list(result)).values_list('', '')for obj_pk, meteor_id in query:result[str(obj_pk)] = meteor_idfor obj_pk, meteor_id in result.items():if meteor_id is None:result[obj_pk] = get_meteor_id(model, obj_pk)return result", "docstring": "Return Alea ID mapping for all given ids of specified model.", "id": "f1655:m1"} {"signature": "def get_object_id(model, meteor_id):", "body": "if meteor_id is None:return Nonemeta = model._metaif model is ObjectMapping:raise TypeError(\"\")if isinstance(meta.pk, AleaIdField):return meteor_idalea_unique_fields = [fieldfor field in meta.local_fieldsif isinstance(field, AleaIdField) and field.unique]if len(alea_unique_fields) == :val = model.objects.values_list('', flat=True,).get(**{alea_unique_fields[].attname: meteor_id,})if val:return valcontent_type = ContentType.objects.get_for_model(model)return ObjectMapping.objects.filter(content_type=content_type,meteor_id=meteor_id,).values_list('', flat=True).get()", "docstring": "Return an object ID for the given meteor_id.", "id": "f1655:m2"} {"signature": "def set_params(self, vals):", "body": "self.params_ejson = ejson.dumps(vals or {})", "docstring": "Set params dict.", "id": "f1655:c4:m2"} {"signature": "def __str__(self):", "body": "return '' % (self.meteor_id, self.content_type, self.object_id,)", "docstring": "Text representation of a mapping.", "id": "f1655:c2:m0"} {"signature": "def get_params(self):", "body": "return ejson.loads(self.params_ejson or '')", "docstring": "Get params dict.", "id": "f1655:c4:m1"} {"signature": "def get_object_ids(model, meteor_ids):", "body": "if model is ObjectMapping:raise TypeError(\"\")meta = model._metaalea_unique_fields = [fieldfor field in meta.local_fieldsif isinstance(field, AleaIdField) and field.unique and not field.null]result = collections.OrderedDict((str(meteor_id), None)for meteor_idin meteor_ids)if len(alea_unique_fields) == :aid = alea_unique_fields[].namequery = model.objects.filter(**{'' % aid: meteor_ids,}).values_list(aid, '')else:content_type = ContentType.objects.get_for_model(model)query = ObjectMapping.objects.filter(content_type=content_type,meteor_id__in=meteor_ids,).values_list('', '')for meteor_id, object_id in query:result[meteor_id] = object_idreturn result", "docstring": "Return all object IDs for the given meteor_ids.", "id": "f1655:m3"} {"signature": "def url(self, path):", "body": "return urljoin('' % (self.scheme, self.server_addr, self.server_port),path,)", "docstring": "Return full URL for given path.", "id": "f1656:c2:m4"} {"signature": "def next_id(self):", "body": "self.call_seq += return self.call_seq", "docstring": "Return next `id` from sequence.", "id": "f1656:c0:m8"} {"signature": "def close(self):", "body": "self.websocket.close()", "docstring": "Close the connection.", "id": "f1656:c0:m4"} {"signature": "def unsub(self, sub_id):", "body": "self.send(msg='', id=sub_id)", "docstring": "Unsubscribe from a publication by sub_id.", "id": "f1656:c0:m13"} {"signature": "def send(self, **msg):", "body": "self.websocket.send(ejson.dumps([ejson.dumps(msg)]))", "docstring": "Send a SockJS wrapped msg.", "id": "f1656:c1:m0"} {"signature": "def ping(self, id_=None):", "body": "if id_:self.send(msg='', id=id_)else:self.send(msg='')", "docstring": "Ping with optional id.", "id": "f1656:c0:m10"} {"signature": "def tearDown(self):", "body": "if self._server is not None:self._server.stop()self._server = Nonefrom django.db import connection, close_old_connectionsclose_old_connections()cur = connection.cursor()cur.execute('')cur.fetchall()cur.close()connection.close()gevent.sleep()super(DDPServerTestCase, self).tearDown()", "docstring": "Stop the DDP server, and reliably close any open DB transactions.", "id": "f1656:c3:m1"} {"signature": "def sockjs(self, url, *args, **kwargs):", "body": "return SockJSClient(self.url(url).replace('', ''), *args, **kwargs)", "docstring": "Return a SockJSClient for the given URL.", "id": "f1656:c2:m3"} {"signature": "def on_m2m_changed(self, sender, **kwargs):", "body": "if self._in_migration:returnif kwargs[''] is False:objs = [kwargs['']]model = objs[].__class__else:model = kwargs['']objs = model.objects.filter(pk__in=kwargs[''])mod_name = model_name(model)if mod_name.split('', )[] in ('', ''):return if kwargs[''] in ('','','',):for obj in objs:self.on_pre_change(sender=model, instance=obj, using=kwargs[''],)elif kwargs[''] in ('','','',):for obj in objs:self.send_notify(model=model,obj=obj,msg=CHANGED,using=kwargs[''],)", "docstring": "M2M-changed signal handler.", "id": "f1657:c6:m18"} {"signature": "def api_endpoints(obj):", "body": "for name in dir(obj):attr = getattr(obj, name)api_path = getattr(attr, '', None)if api_path:yield ('' % (obj.api_path_prefix, api_path),attr,)for api_provider in obj.api_providers:for api_path, attr in api_endpoints(api_provider):yield (api_path, attr)", "docstring": "Iterator over all API endpoint names and callbacks.", "id": "f1657:m1"} {"signature": "def ready(self):", "body": "pass", "docstring": "Initialisation (setup lookups and signal handlers).", "id": "f1657:c1:m3"} {"signature": "def objects_for_user(self, user, qs=None, xmin__lte=None):", "body": "qs = self.get_queryset(qs)user_rels = self.user_relif user_rels:if user is None:return qs.none() if isinstance(user_rels, basestring):user_rels = [user_rels]user_filter = Nonemeta = self.model._metafor user_rel in user_rels:name, rel = (user_rel.split('', ) + [None])[:]field = meta.pk if name == '' else meta.get_field(name)if field.related_model is not None:filter_obj = Q(**{'' % name: field.related_model.objects.filter(**{rel or '': user}).values(''),})else:filter_obj = Q(**{str(user_rel): user})if user_filter is None:user_filter = filter_objelse:user_filter |= filter_objqs = qs.filter(user_filter).distinct()if xmin__lte is not None:qs = qs.extra(where=[\"\"],params=[xmin__lte],)return qs", "docstring": "Find objects in queryset related to specified user.", "id": "f1657:c3:m2"} {"signature": "def __new__(mcs, name, bases, attrs):", "body": "attrs.update(api_path_prefix_format=COLLECTION_PATH_FORMAT,)model = attrs.get('', None)if attrs.get('', None) is None and model is not None:attrs.update(name=model_name(model),)return super(CollectionMeta, mcs).__new__(mcs, name, bases, attrs)", "docstring": "Create a new Collection class.", "id": "f1657:c2:m0"} {"signature": "def on_post_migrate(self, sender, **kwargs):", "body": "self._in_migration = Falsetry:Connection.objects.all().delete()except DatabaseError: pass", "docstring": "Post-migrate signal handler.", "id": "f1657:c6:m16"} {"signature": "def clear_api_path_map_cache(self):", "body": "self._api_path_cache = Nonefor api_provider in self.api_providers:if six.get_method_self(api_provider.clear_api_path_map_cache,) is not None:api_provider.clear_api_path_map_cache()", "docstring": "Clear out cache for api_path_map.", "id": "f1657:c1:m1"} {"signature": "@api_endpointdef collections(self, *params):", "body": "return sorted(set(hasattr(qs, '') and model_name(qs.model) or qs[]for qsin self.get_queries(False, *params)))", "docstring": "Return list of collections for this publication.", "id": "f1657:c5:m1"} {"signature": "def model_name(model):", "body": "return force_text(model._meta)", "docstring": "Return model name given model class.", "id": "f1657:m2"} {"signature": "def do_unsub(self, id_, silent):", "body": "sub = Subscription.objects.get(connection=this.ws.connection, sub_id=id_,)for col, qs in self.sub_unique_objects(sub):if isinstance(col.model._meta.pk, AleaIdField):meteor_ids = Noneelse:meteor_ids = get_meteor_ids(qs.model, qs.values_list('', flat=True),)for obj in qs:payload = col.obj_change_as_msg(obj, REMOVED, meteor_ids)this.send(payload)this.subs[sub.publication].remove(sub.pk)sub.delete()if not silent:this.send({'': '', '': id_})", "docstring": "Unsubscribe the current thread from the specified subscription id.", "id": "f1657:c6:m10"} {"signature": "@api_endpoint(decorate=False)def method(self, method, params, id_):", "body": "try:handler = self.api_path_map()[method]except KeyError:raise MeteorError(, '', method)try:inspect.getcallargs(handler, *params)except TypeError as err:raise MeteorError(, '' % err)result = handler(*params)msg = {'': '', '': id_}if result is not None:msg[''] = resultthis.send(msg)", "docstring": "Invoke a method.", "id": "f1657:c6:m11"} {"signature": "def valid_subscribers(self, model, obj, using):", "body": "col_user_ids = {}col_connection_ids = collections.defaultdict(set)for sub in Subscription.objects.filter(collections__model_name=model_name(model),).prefetch_related(''):pub = self.get_pub_by_name(sub.publication)try:queries = list(pub.user_queries(sub.user, *sub.params))except Exception:queries = []for qs, col in (self.qs_and_collection(qs)for qsin queries):if qs.model is not model:continue if not qs.filter(pk=obj.pk).exists():continue try:user_ids = col_user_ids[col.__class__]except KeyError:user_ids = col_user_ids[col.__class__] =col.user_ids_for_object(obj)if user_ids is None:pass elif sub.user_id not in user_ids:continue col_connection_ids[col].add(sub.connection_id)return col_connection_ids", "docstring": "Calculate valid subscribers (connections) for obj.", "id": "f1657:c6:m21"} {"signature": "def on_post_save(self, sender, **kwargs):", "body": "if self._in_migration:returnself.send_notify(model=sender,obj=kwargs[''],msg=kwargs[''] and ADDED or CHANGED,using=kwargs[''],)", "docstring": "Post-save signal handler.", "id": "f1657:c6:m19"} {"signature": "def __init__(self):", "body": "self._registry = {}self._ddp_subscribers = {}", "docstring": "DDP API init.", "id": "f1657:c6:m0"} {"signature": "def __new__(cls, name, bases, attrs):", "body": "attrs[''] = attrs.pop('', None) or namename_format = attrs.get('', None)if name_format:attrs[''] = name_format.format(**attrs)api_path_prefix_format = attrs.get('', None)if attrs.get('', None) is not None:passelif api_path_prefix_format is not None:attrs[''] = api_path_prefix_format.format(**attrs)return super(APIMeta, cls).__new__(cls, name, bases, attrs)", "docstring": "Create a new APIMixin class.", "id": "f1657:c0:m0"} {"signature": "def recv_unsub(self, id_=None):", "body": "if id_:self.api.unsub(id_)else:self.reply('')", "docstring": "DDP unsub handler.", "id": "f1658:c0:m13"} {"signature": "def get_tx_id(self):", "body": "return next(self._tx_buffer_id_gen)", "docstring": "Get the next TX msg ID.", "id": "f1658:c0:m0"} {"signature": "def safe_call(func, *args, **kwargs):", "body": "try:return None, func(*args, **kwargs)except Exception: return traceback.format_exc(), None", "docstring": "Call `func(*args, **kwargs)` but NEVER raise an exception.\n\nUseful in situations such as inside exception handlers where calls to\n`logging.error` try to send email, but the SMTP server isn't always\navailalbe and you don't want your exception handler blowing up.", "id": "f1658:m0"} {"signature": "def __str__(self):", "body": "return self.remote_addr", "docstring": "Show remote address that connected to us.", "id": "f1658:c0:m2"} {"signature": "def on_close(self, *args, **kwargs):", "body": "if self.connection is not None:del self.pgworker.connections[self.connection.pk]self.connection.delete()self.connection = Nonesignals.request_finished.send(sender=self.__class__)safe_call(self.logger.info, '', self, args or '')", "docstring": "Handle closing of websocket connection.", "id": "f1658:c0:m3"} {"signature": "def validate_kwargs(func, kwargs):", "body": "func_name = func.__name__argspec = inspect.getargspec(func)all_args = argspec.args[:]defaults = list(argspec.defaults or [])if inspect.ismethod(func) and all_args[:] == ['']:all_args[:] = []if defaults:required = all_args[:-len(defaults)]else:required = all_args[:]trans = {arg: arg.endswith('') and arg[:-] or argfor argin all_args}for key in list(kwargs):key_adj = '' % keyif key_adj in all_args:kwargs[key_adj] = kwargs.pop(key)supplied = sorted(kwargs)missing = [trans.get(arg, arg) for arg in requiredif arg not in supplied]if missing:raise MeteorError(,func.err,'' % (func_name,''.join(missing),),)extra = [arg for arg in suppliedif arg not in all_args]if extra:raise MeteorError(,func.err,'' % (func_name, ''.join(extra)),)", "docstring": "Validate arguments to be supplied to func.", "id": "f1658:m2"} {"signature": "def recv_ping(self, id_=None):", "body": "if id_ is None:self.reply('')else:self.reply('', id=id_)", "docstring": "DDP ping handler.", "id": "f1658:c0:m11"} {"signature": "def recv_method(self, method, params, id_, randomSeed=None):", "body": "if randomSeed is not None:this.random_streams.random_seed = randomSeedthis.alea_random = alea.Alea(randomSeed)self.api.method(method, params, id_)self.reply('', methods=[id_])", "docstring": "DDP method handler.", "id": "f1658:c0:m14"} {"signature": "def send(self, data, tx_id=None):", "body": "if tx_id is None:tx_id = self.get_tx_id()self._tx_buffer[tx_id] = datawhile self._tx_next_id in self._tx_buffer:data = self._tx_buffer.pop(self._tx_next_id)if self._tx_buffer:safe_call(self.logger.debug, '', self._tx_next_id)self._tx_next_id = next(self._tx_next_id_gen)if not isinstance(data, basestring):msg = data.get('', None)if msg in (ADDED, CHANGED, REMOVED):ids = self.remote_ids[data['']]meteor_id = data['']if msg == ADDED:if meteor_id in ids:msg = data[''] = CHANGEDelse:ids.add(meteor_id)elif msg == CHANGED:if meteor_id not in ids:msg = data[''] = ADDEDids.add(meteor_id)elif msg == REMOVED:try:ids.remove(meteor_id)except KeyError:continue data = '' % ejson.dumps([ejson.dumps(data)])safe_call(self.logger.debug, '', self, data)try:self.ws.send(data)except geventwebsocket.WebSocketError:self.ws.close()self._tx_buffer.clear()breaknum_waiting = len(self._tx_buffer)if num_waiting > :safe_call(self.logger.warn,'',tx_id, self._tx_next_id, num_waiting, self._tx_buffer,)", "docstring": "Send `data` (raw string or EJSON payload) to WebSocket client.", "id": "f1658:c0:m8"} {"signature": "@staticmethoddef path_to_dir(*path_args):", "body": "return os.path.join(*list(path_args[:-]) + path_args[-].split(posixpath.sep))", "docstring": "Convert a UNIX-style path into platform specific directory spec.", "id": "f1669:c0:m5"} {"signature": "def finalize_options(self):", "body": "self.set_undefined_options('',('', ''),)self.set_undefined_options('',('', ''),)setuptools.command.build_py.build_py.finalize_options(self)", "docstring": "Update command options.", "id": "f1669:c0:m1"} {"signature": "def __init__(self, **settings):", "body": "super(FileSession, self).__init__(**settings)self.host = settings.get(\"\", self.DEFAULT_SESSION_POSITION)self._prefix = settings.get(\"\", '')if not exists(self.host):os.makedirs(self.host, ) if not isdir(self.host):raise SessionConfigurationError('')", "docstring": "Initialize File Session Driver.\nsettings section 'host' is recommended, the option 'prefix' is an optional.\nif prefix is not given, 'default' is the default.\nhost: where to save session object file, this is a directory path.\nprefix: session file name's prefix. session file like: prefix@session_id", "id": "f1676:c0:m0"} {"signature": "def get(self, session_id):", "body": "if session_id not in self._data_handler:return {}session_obj = self._data_handler[session_id]now = datetime.utcnow()expires = session_obj.get('', now)if expires > now:return session_objreturn {}", "docstring": "get session object from host.", "id": "f1677:c0:m1"} {"signature": "def __init_session_driver(self):", "body": "driver = self.settings.get(\"\")if not driver:raise SessionConfigurationError('')driver_settings = self.settings.get(\"\", {})if not driver_settings:raise SessionConfigurationError('')cache_driver = self.settings.get(\"\", True)if cache_driver:cache_name = ''cache_handler = self.handler.applicationif not hasattr(cache_handler, cache_name):setattr(cache_handler,cache_name,SessionDriverFactory.create_driver(driver, **driver_settings))session_driver = getattr(cache_handler, cache_name)else:session_driver = SessionDriverFactory.create_driver(driver, **driver_settings)self.driver = session_driver(**driver_settings)", "docstring": "setup session driver.", "id": "f1679:c0:m2"} {"signature": "def _get_session_object_from_driver(self, session_id):", "body": "return self.driver.get(session_id)", "docstring": "Get session data from driver.", "id": "f1679:c0:m5"} {"signature": "def run(self):", "body": "return self.cdb.db.query(\"\", self.query)", "docstring": "Runs the dataset query, and returns the result", "id": "f1689:c0:m2"} {"signature": "def ping(self):", "body": "return self.db.ping()", "docstring": "Pings the ConnectorDB server. Useful for checking if the connection is valid", "id": "f1691:c0:m10"} {"signature": "def count_users(self):", "body": "return int(self.db.get(\"\", {\"\": \"\"}).text)", "docstring": "Gets the total number of users registered with the database. Only available to administrator.", "id": "f1691:c0:m4"} {"signature": "def setauth(self,basic_auth):", "body": "self.headers = []if basic_auth is not None:class auth_extractor():def __init__(self):self.headers = {}extractor = auth_extractor()basic_auth(extractor)for header in extractor.headers:self.headers.append(\"\" % (header, extractor.headers[header]))", "docstring": "setauth can be used during runtime to make sure that authentication is reset.\n it can be used when changing passwords/apikeys to make sure reconnects succeed", "id": "f1692:c0:m1"} {"signature": "def __on_message(self, ws, msg):", "body": "msg = json.loads(msg)logging.debug(\"\", msg[\"\"])stream_key = msg[\"\"] + \"\"if \"\" in msg:stream_key += msg[\"\"]self.subscription_lock.acquire()if stream_key in self.subscriptions:subscription_function = self.subscriptions[stream_key]self.subscription_lock.release()fresult = subscription_function(msg[\"\"], msg[\"\"])if fresult is True:fresult = msg[\"\"]if fresult is not False and fresult is not None and msg[\"\"].endswith(\"\") and msg[\"\"].count(\"\") == :self.insert(msg[\"\"][:-], fresult)else:self.subscription_lock.release()logging.warn(\"\",msg[\"\"], list(self.subscriptions.keys()))", "docstring": "This function is called whenever there is a message received from the server", "id": "f1692:c0:m16"} {"signature": "def unsubscribe(self, stream, transform=\"\"):", "body": "if self.status is not \"\":return Falselogging.debug(\"\", stream)self.send({\"\": \"\",\"\": stream,\"\": transform})self.subscription_lock.acquire()del self.subscriptions[stream + \"\" + transform]if len(self.subscriptions) is :self.subscription_lock.release()self.disconnect()else:self.subscription_lock.release()", "docstring": "Unsubscribe from the given stream (with the optional transform)", "id": "f1692:c0:m7"} {"signature": "def __reconnect_fnc(self):", "body": "if self.connect():self.__resubscribe()else:self.__reconnect()", "docstring": "This function is called by reconnect after the time delay", "id": "f1692:c0:m11"} {"signature": "def __del__(self):", "body": "self.disconnect()", "docstring": "Make sure that all threads shut down when needed", "id": "f1692:c0:m19"} {"signature": "def connect(self):", "body": "self.ws_openlock.acquire()self.ws_openlock.release()if self.status == \"\":return True if self.status == \"\":time.sleep()return self.connect()if self.status == \"\" or self.status == \"\":self.ws = websocket.WebSocketApp(self.ws_url,header=self.headers,on_message=self.__on_message,on_ping=self.__on_ping,on_open=self.__on_open,on_close=self.__on_close,on_error=self.__on_error)self.ws_thread = threading.Thread(target=self.ws.run_forever)self.ws_thread.daemon = Trueself.status = \"\"self.ws_openlock.acquire()self.ws_thread.start()self.ws_openlock.acquire()self.ws_openlock.release()return self.status == \"\"", "docstring": "Attempt to connect to the websocket - and returns either True or False depending on if\n the connection was successful or not", "id": "f1692:c0:m8"} {"signature": "def subscribe(self, stream, callback, transform=\"\"):", "body": "if self.status == \"\" or self.status == \"\" or self.status == \"\":self.connect()if self.status is not \"\":return Falselogging.debug(\"\", stream)self.send({\"\": \"\", \"\": stream, \"\": transform})with self.subscription_lock:self.subscriptions[stream + \"\" + transform] = callbackreturn True", "docstring": "Given a stream, a callback and an optional transform, sets up the subscription", "id": "f1692:c0:m6"} {"signature": "def delete(self):", "body": "self.db.delete(self.path)", "docstring": "Deletes the user/device/stream", "id": "f1693:c0:m3"} {"signature": "@propertydef name(self):", "body": "return self.data[\"\"]", "docstring": "Returns the object's name. Object names are immutable (unless logged in is a database admin)", "id": "f1693:c0:m6"} {"signature": "@propertydef description(self):", "body": "if \"\" in self.data:return self.data[\"\"]return None", "docstring": "Allows to directly set the object's description. Use as a property", "id": "f1693:c0:m9"} {"signature": "def set(self, property_dict):", "body": "self.metadata = self.db.update(self.path, property_dict).json()", "docstring": "Attempts to set the given properties of the object.\n An example of this is setting the nickname of the object::\n\n cdb.set({\"nickname\": \"My new nickname\"})\n\n note that there is a convenience property `cdb.nickname` that allows you to get/set the nickname directly.", "id": "f1693:c0:m5"} {"signature": "def refresh(self):", "body": "self.metadata = self.db.read(self.path).json()", "docstring": "Refresh reloads data from the server. It raises an error if it fails to get the object's metadata", "id": "f1693:c0:m1"} {"signature": "@propertydef public(self):", "body": "if \"\" in self.data:return self.data[\"\"]return None", "docstring": "gets whether the device is public\n (this means different things based on connectordb permissions setup - connectordb.com\n has this be whether the device is publically visible. Devices are individually public/private.)", "id": "f1694:c0:m8"} {"signature": "def streams(self):", "body": "result = self.db.read(self.path, {\"\": \"\"})if result is None or result.json() is None:return []streams = []for s in result.json():strm = self[s[\"\"]]strm.metadata = sstreams.append(strm)return streams", "docstring": "Returns the list of streams that belong to the device", "id": "f1694:c0:m1"} {"signature": "def import_stream(self, directory):", "body": "with open(os.path.join(directory, \"\"), \"\") as f:sdata = json.load(f)s = self[sdata[\"\"]]if s.exists():raise ValueError(\"\" + s.name + \"\")s.create()ddb = DatabaseConnection(self.apikey, url=self.db.baseurl)d = Device(ddb, self.path)sown = d[s.name]sown.insert_array(DatapointArray().loadExport(directory))if (sdata[\"\"] and self.db.path != self.path):s.downlink = Truewith open(os.path.join(directory, \"\"), \"\") as f:s.insert_array(json.load(f))del sdata[\"\"]s.set(sdata)", "docstring": "Imports a stream from the given directory. You export the Stream\n by using stream.export()", "id": "f1694:c0:m5"} {"signature": "@propertydef role(self):", "body": "if \"\" in self.data:return self.data[\"\"]return None", "docstring": "Gets the role of the device. This is the permissions level that the device has. It might\n not be accessible depending on the permissions setup of ConnectorDB. Returns None if not accessible", "id": "f1694:c0:m10"} {"signature": "def close(self):", "body": "self.r.close()", "docstring": "Closes the active connections to ConnectorDB", "id": "f1695:c2:m2"} {"signature": "def unsubscribe(self, stream, transform=\"\"):", "body": "return self.ws.unsubscribe(stream, transform)", "docstring": "Unsubscribe from the given stream", "id": "f1695:c2:m12"} {"signature": "def get(self, path, params=None):", "body": "return self.handleresult(self.r.get(urljoin(self.url, path),params=params))", "docstring": "Sends a get request to the given path in the database and with optional URL parameters", "id": "f1695:c2:m10"} {"signature": "def read(self, path, params=None):", "body": "return self.handleresult(self.r.get(urljoin(self.url + CRUD_PATH,path),params=params))", "docstring": "Read the result at the given path (GET) from the CRUD API, using the optional params dictionary\n as url parameters.", "id": "f1695:c2:m7"} {"signature": "def setauth(self, user_or_apikey=None, user_password=None):", "body": "auth = Noneif user_or_apikey is not None:if user_password is None:user_password = user_or_apikeyuser_or_apikey = \"\"auth = HTTPBasicAuth(user_or_apikey, user_password)self.r.auth = authself.ws.setauth(auth)", "docstring": "setauth sets the authentication header for use in the session.\n It is for use when apikey is updated or something of the sort, such that\n there is a seamless experience.", "id": "f1695:c2:m1"} {"signature": "@propertydef sschema(self):", "body": "if \"\" in self.data:return self.data[\"\"]return None", "docstring": "Returns the JSON schema of the stream as a string", "id": "f1697:c0:m19"} {"signature": "def __getitem__(self, getrange):", "body": "if not isinstance(getrange, slice):return self(i1=getrange, i2=getrange + )[]return self(i1=getrange.start, i2=getrange.stop)", "docstring": "Allows accessing the stream just as if it were just one big python array.\n An example::\n\n #Returns the most recent 5 datapoints from the stream\n stream[-5:]\n\n #Returns all the data the stream holds.\n stream[:]\n\n In order to perform transforms on the stream and to aggreagate data, look at __call__,\n which allows getting index ranges along with a transform.", "id": "f1697:c0:m7"} {"signature": "@propertydef schema(self):", "body": "if \"\" in self.data:return json.loads(self.data[\"\"])return None", "docstring": "Returns the JSON schema of the stream as a python dict.", "id": "f1697:c0:m18"} {"signature": "@schema.setterdef schema(self, schema):", "body": "if isinstance(schema, basestring):strschema = schemaschema = json.loads(schema)else:strschema = json.dumps(schema)Draft4Validator.check_schema(schema)self.set({\"\": strschema})", "docstring": "sets the stream's schema. An empty schema is \"{}\". The schemas allow you to set a specific data type. \n Both python dicts and strings are accepted.", "id": "f1697:c0:m20"} {"signature": "def append(self, data):", "body": "self.insert(data)", "docstring": "Same as insert, using the pythonic array name", "id": "f1697:c0:m3"} {"signature": "def import_device(self, directory):", "body": "with open(os.path.join(directory, \"\"), \"\") as f:ddata = json.load(f)d = self[ddata[\"\"]]dname = ddata[\"\"]del ddata[\"\"]if dname == \"\":returnelif dname == \"\":d.set(ddata)elif d.exists():raise ValueError(\"\" + d.name + \"\")else:d.create(**ddata)for name in os.listdir(directory):sdir = os.path.join(directory, name)if os.path.isdir(sdir):d.import_stream(sdir)", "docstring": "Imports a device from the given directory. You export the device\n by using device.export()\n\n There are two special cases: user and meta devices.\n If the device name is meta, import_device will not do anything.\n If the device name is \"user\", import_device will overwrite the user device\n even if it exists already.", "id": "f1698:c0:m7"} {"signature": "def streams(self, public=False, downlink=False, visible=True):", "body": "result = self.db.read(self.path, {\"\": \"\",\"\": str(public).lower(),\"\": str(downlink).lower(),\"\": str(visible).lower()})if result is None or result.json() is None:return []streams = []for d in result.json():s = self[d[\"\"]][d[\"\"]]s.metadata = dstreams.append(s)return streams", "docstring": "Returns the list of streams that belong to the user.\n The list can optionally be filtered in 3 ways:\n - public: when True, returns only streams belonging to public devices\n - downlink: If True, returns only downlink streams\n - visible: If True (default), returns only streams of visible devices", "id": "f1698:c0:m3"} {"signature": "@role.setterdef role(self, new_role):", "body": "self.set({\"\": new_role})", "docstring": "Attempts to set the user's role", "id": "f1698:c0:m13"} {"signature": "def export(self, directory):", "body": "exportInfoFile = os.path.join(directory, \"\")if os.path.exists(directory):if not os.path.exists(exportInfoFile):raise FileExistsError(\"\")with open(exportInfoFile) as f:exportInfo = json.load(f)if exportInfo[\"\"] != :raise ValueError(\"\")else:os.mkdir(directory)with open(exportInfoFile, \"\") as f:json.dump({\"\": , \"\": self.db.get(\"\").text}, f)udir = os.path.join(directory, self.name)os.mkdir(udir)with open(os.path.join(udir, \"\"), \"\") as f:json.dump(self.data, f)for d in self.devices():d.export(os.path.join(udir, d.name))", "docstring": "Exports the ConnectorDB user into the given directory.\n The resulting export can be imported by using the import command(cdb.import(directory)),\n\n Note that Python cannot export passwords, since the REST API does\n not expose password hashes. Therefore, the imported user will have\n password same as username.\n\n The user export function is different than device and stream exports because\n it outputs a format compatible directly with connectorDB's import functionality:\n\n connectordb import < mydatabase > \n\n This also means that you can export multiple users into the same directory without issue", "id": "f1698:c0:m6"} {"signature": "def __repr__(self):", "body": "return \"\" % (self.path, )", "docstring": "Returns a string representation of the user", "id": "f1698:c0:m5"} {"signature": "def sum(self):", "body": "raw = self.raw()s = for i in range(len(raw)):s += raw[i][\"\"]return s", "docstring": "Gets the sum of the data portions of all datapoints within", "id": "f1699:c0:m13"} {"signature": "def t(self):", "body": "return list(map(lambda x: datetime.datetime.fromtimestamp(x[\"\"]), self.raw()))", "docstring": "Returns just the timestamp portion of the datapoints as a list.\n The timestamps are in python datetime's date format.", "id": "f1699:c0:m6"} {"signature": "def mean(self):", "body": "return self.sum() / float(len(self))", "docstring": "Gets the mean of the data portions of all datapoints within", "id": "f1699:c0:m14"} {"signature": "def stop(self):", "body": "with self.synclock:if self.syncthread is not None:self.syncthread.cancel()self.syncthread = None", "docstring": "Stops the background synchronization thread", "id": "f1700:c0:m13"} {"signature": "def __init__(self, database_file_path, on_create=None, apikey=None, onsync=None, onsyncfail=None, syncraise=False):", "body": "self.database = apsw.Connection(database_file_path)c = self.database.cursor()c.execute(\"\")c.execute(\"\")c.execute(\"\")c.execute(\"\")row_number = next(c)[]if row_number == :logging.debug(\"\")c.execute(\"\",(CONNECTORDB_URL, ))c.execute(\"\")self.__apikey, self.__serverurl, self.__lastsync, self.__syncperiod = next(c)c.execute(\"\")self.streams = {}for row in c.fetchall():self.streams[row[]] = json.loads(row[])if apikey is not None:self.apikey = apikeyself.synclock = threading.Lock()self.syncthread = Noneself.__cdb = Noneself.onsync = onsyncself.onsyncfail = onsyncfailself.syncraise = syncraiseif on_create is not None and row_number == :try:if False == on_create(self):raise Exception(\"\")except:self.database.close()os.remove(database_file_path)raise", "docstring": "Logger is started by passing its database file, and an optional callback which is run if the database\n is not initialized, allowing setup code to be only run once.\n\n The on_create callback can optionally be used to initialize the necessary api keys and such.\n If on_create returns False or raises an error, the uninitialized database file will be removed.", "id": "f1700:c0:m0"} {"signature": "def start(self):", "body": "with self.synclock:if self.syncthread is not None:logging.warn(\"\")returnself.sync() self.__setsync()", "docstring": "Start the logger background synchronization service. This allows you to not need to\n worry about syncing with ConnectorDB - you just insert into the Logger, and the Logger\n will by synced every syncperiod.", "id": "f1700:c0:m12"} {"signature": "def insert(self, streamname, value):", "body": "if streamname not in self.streams:raise Exception(\"\" % (streamname, ))validate(value, self.streams[streamname])value = json.dumps(value)logging.debug(\"\" % (streamname, value))c = self.database.cursor()c.execute(\"\",(streamname, time.time(), value))", "docstring": "Insert the datapoint into the logger for the given stream name. The logger caches the datapoint\n and eventually synchronizes it with ConnectorDB", "id": "f1700:c0:m7"} {"signature": "def close(self):", "body": "self.stop()with self.synclock:self.database.close()", "docstring": "Closes the database connections and stops all synchronization.", "id": "f1700:c0:m4"} {"signature": "@propertydef connectordb(self):", "body": "if self.__cdb is None:logging.debug(\"\" + self.serverurl)self.__cdb = ConnectorDB(self.apikey, url=self.serverurl)return self.__cdb", "docstring": "Returns the ConnectorDB object that the logger uses. Raises an error if Logger isn't able to connect", "id": "f1700:c0:m1"} {"signature": "@propertydef data(self):", "body": "c = self.database.cursor()c.execute(\"\")return json.loads(next(c)[])", "docstring": "The data property allows the user to save settings/data in the database, so that\n there does not need to be extra code messing around with settings.\n\n Use this property to save things that can be converted to JSON inside the logger database,\n so that you don't have to mess with configuration files or saving setting otherwise::\n\n from connectordb.logger import Logger\n\n l = Logger(\"log.db\")\n\n l.data = {\"hi\": 56}\n\n # prints the data dictionary\n print l.data", "id": "f1700:c0:m24"} {"signature": "@register.filterdef timestamp(value):", "body": "return timedelta(, int(value))", "docstring": "Timestamp to elapsed time format.", "id": "f1705:m1"} {"signature": "@register.filterdef human_bytes(value):", "body": "value = float(value)if value >= :gigabytes = value / size = '' % gigabyteselif value >= :megabytes = value / size = '' % megabyteselif value >= :kilobytes = value / size = '' % kilobyteselse:size = '' % valuereturn size", "docstring": "Convert a byte value into a human-readable format.", "id": "f1705:m0"} {"signature": "def _context_data(data, request=None):", "body": "try:return dict(site.each_context(request).items() + data.items())except AttributeError:return data", "docstring": "Add admin global context, for compatibility with Django 1.7", "id": "f1706:m3"} {"signature": "def flush(request):", "body": "mc_client.flush_all()messages.success(request, _(''))return redirect('')", "docstring": "Flush servers.", "id": "f1706:m8"} {"signature": "def parse(src, encoding=None):", "body": "def safe_is_file(filename):try:return os.path.isfile(src)except ValueError: return Falseif hasattr(src, ''): data = src.read()elif safe_is_file(src):with open(src, '') as fh:data = fh.read()else: data = srcif hasattr(data, ''): exception = Noneencodings = [encoding, '', '', '', '']for encoding in encodings: if not encoding:continuetry:data = data.decode(encoding)breakexcept UnicodeDecodeError as e:exception = eexcept UnicodeEncodeError:breakelse:raise exception transactions = mt940.models.Transactions()transactions.parse(data)return transactions", "docstring": "Parses mt940 data and returns transactions object\n\n:param src: file handler to read, filename to read or raw data as string\n:return: Collection of transactions\n:rtype: Transactions", "id": "f1712:m0"} {"signature": "def mBank_set_tnr(transactions, tag, tag_dict, *args):", "body": "matches = tnr_re.search(tag_dict[tag.slug])if matches: tag_dict[''] = matches.groupdict()['']return tag_dict", "docstring": "mBank Collect states TNR in transaction details as unique id for\ntransactions, that may be used to identify the same transactions in\ndifferent statement files eg. partial mt942 and full mt940\nInformation about tnr uniqueness has been obtained from mBank support,\nit lacks in mt940 mBank specification.", "id": "f1716:m5"} {"signature": "def date_fixup_pre_processor(transactions, tag, tag_dict, *args):", "body": "if tag_dict[''] == '':year = int(tag_dict[''], )_, max_month_day = calendar.monthrange(year, )if int(tag_dict[''], ) > max_month_day:tag_dict[''] = str(max_month_day)return tag_dict", "docstring": "Replace illegal February 29, 30 dates with the last day of February.\n\nGerman banks use a variant of the 30/360 interest rate calculation,\nwhere each month has always 30 days even February. Python's datetime\nmodule won't accept such dates.", "id": "f1716:m1"} {"signature": "def coalesce(*args):", "body": "for arg in args:if arg is not None:return arg", "docstring": "Return the first non-None argument\n\n>>> coalesce()\n\n>>> coalesce(0, 1)\n0\n>>> coalesce(None, 0)\n0", "id": "f1718:m0"} {"signature": "def frombytes(data, size, bandtype=gdal.GDT_Byte):", "body": "r = ImageDriver('').raster('', size, bandtype)r.frombytes(data)return r", "docstring": "Returns an in-memory raster initialized from a pixel buffer.\n\n Arguments:\n data -- byte buffer of raw pixel data\n size -- two or three-tuple of (xsize, ysize, bandcount)\n bandtype -- band data type", "id": "f1741:m7"} {"signature": "def driver_for_path(path, drivers=None):", "body": "ext = (os.path.splitext(path)[][:] or path).lower()drivers = drivers or ImageDriver.registry if ext else {}for name, meta in drivers.items():if ext == meta.get('', '').lower():return ImageDriver(name)return None", "docstring": "Returns the gdal.Driver for a path or None based on the file extension.\n\n Arguments:\n path -- file path as str with a GDAL supported file extension", "id": "f1741:m1"} {"signature": "def masked_array(self, geometry=None):", "body": "if geometry is None:return self._masked_array()geom = transform(geometry, self.sref)env = Envelope.from_geom(geom).intersect(self.envelope)arr = self._masked_array(env)if geom.GetGeometryType() != ogr.wkbPoint:dims = self.get_offset(env)[:]affine = AffineTransform(*tuple(self.affine))affine.origin = env.ulmask = ~np.ma.make_mask(geom_to_array(geom, dims, affine))arr.mask = arr.mask | maskreturn arr", "docstring": "Returns a MaskedArray using nodata values.\n\n Keyword args:\n geometry -- any geometry, envelope, or coordinate extent tuple", "id": "f1741:c2:m22"} {"signature": "def rasterize(layer, rast):", "body": "driver = ImageDriver('')r2 = driver.raster(driver.ShortName, rast.size)r2.affine = rast.affinesref = rast.srefif not sref.srid:sref = SpatialReference()r2.sref = srefml = MemoryLayer(sref, layer.GetGeomType())ml.load(layer)status = gdal.RasterizeLayer(r2.ds, (,), ml.layer, options=['' % ml.id])ml.close()return r2", "docstring": "Returns a Raster from layer features.\n\n Arguments:\n layer -- Layer to rasterize\n rast -- Raster with target affine, size, and sref", "id": "f1741:m4"} {"signature": "@propertydef info(self):", "body": "return self._driver.GetMetadata()", "docstring": "Returns a dict of gdal.Driver metadata.", "id": "f1741:c1:m11"} {"signature": "def raster(self, path, size, bandtype=gdal.GDT_Byte):", "body": "path = getattr(path, '', path)try:is_multiband = len(size) > nx, ny, nbands = size if is_multiband else size + (,)except (TypeError, ValueError) as exc:exc.args = ('',)raiseif nx < or ny < :raise ValueError('' % (size,))if not self._is_empty(path):raise IOError('' % path)ds = self.Create(path, nx, ny, nbands, bandtype)if not ds:raise ValueError('' % (path, str(self)))return Raster(ds)", "docstring": "Returns a new Raster instance.\n\n gdal.Driver.Create() does not support all formats.\n\n Arguments:\n path -- file object or path as str\n size -- two or three-tuple of (xsize, ysize, bandcount)\n bandtype -- GDAL pixel data type", "id": "f1741:c1:m10"} {"signature": "def project(self, coords):", "body": "geotransform = self.tuplefor x, y in coords:geo_x = geotransform[] + geotransform[] * x + geotransform[] * ygeo_y = geotransform[] + geotransform[] * x + geotransform[] * ygeo_x += geotransform[] / geo_y += geotransform[] / yield geo_x, geo_y", "docstring": "Convert image pixel/line coordinates to georeferenced x/y, return a\n generator of two-tuples.\n\n Arguments:\n coords -- input coordinates as iterable containing two-tuples/lists\n such as ((0, 0), (10, 10))", "id": "f1741:c0:m5"} {"signature": "@propertydef polygon(self):", "body": "ring = ogr.Geometry(ogr.wkbLinearRing)for coord in self.ll, self.lr, self.ur, self.ul, self.ll:ring.AddPoint_2D(*coord)polyg = ogr.Geometry(ogr.wkbPolygon)polyg.AddGeometryDirectly(ring)return polyg", "docstring": "Returns an OGR Geometry for this envelope.", "id": "f1742:c0:m21"} {"signature": "@propertydef centroid(self):", "body": "return self.min_x + self.width * , self.min_y + self.height * ", "docstring": "Returns the envelope centroid as a (x, y) tuple.", "id": "f1742:c0:m9"} {"signature": "def Geometry(*args, **kwargs):", "body": "arg = kwargs.pop('', None) or len(args) and args[]try:srs = kwargs.pop('', None) or arg.srs.wktexcept AttributeError:srs = SpatialReference()if hasattr(arg, ''):geom = ogr.CreateGeometryFromJson(json.dumps(arg))elif hasattr(arg, ''):char = arg[] if arg else ''i = char if isinstance(char, int) else ord(char)if i in (, ):geom = ogr.CreateGeometryFromWkb(arg)elif arg.startswith(''):geom = ogr.CreateGeometryFromJson(arg)elif arg.startswith(''):geom = ogr.CreateGeometryFromGML(arg)else:raise ValueError('' % arg)elif hasattr(arg, ''):geom = ogr.CreateGeometryFromWkb(bytes(arg.wkb))else:geom = ogr.Geometry(*args, **kwargs)if geom:if not isinstance(srs, SpatialReference):srs = SpatialReference(srs)geom.AssignSpatialReference(srs)return geom", "docstring": "Returns an ogr.Geometry instance optionally created from a geojson str\n or dict. The spatial reference may also be provided.", "id": "f1742:m2"} {"signature": "@propertydef lr(self):", "body": "return self.max_x, self.min_y", "docstring": "Returns the lower right coordinate.", "id": "f1742:c0:m18"} {"signature": "@propertydef ll(self):", "body": "return self.min_x, self.min_y", "docstring": "Returns the lower left coordinate.", "id": "f1742:c0:m16"} {"signature": "def __init__(self, *args):", "body": "if len(args) == :args = args[]try:extent = list(map(float, args))except (TypeError, ValueError) as exc:exc.args = ('' % repr(args),)raisetry:self.min_x, self.max_x = sorted(extent[::])self.min_y, self.max_y = sorted(extent[::])except ValueError as exc:exc.args = ('' % len(args),)raise", "docstring": "Creates an envelope from lower-left and upper-right coordinates.\n\n Arguments:\n args -- min_x, min_y, max_x, max_y or a four-tuple", "id": "f1742:c0:m0"} {"signature": "@propertydef srid(self):", "body": "epsg_id = (self.GetAuthorityCode('') orself.GetAuthorityCode(''))try:return int(epsg_id)except TypeError:return", "docstring": "Returns the EPSG ID as int if it exists.", "id": "f1745:c0:m5"} {"signature": "@abc.abstractmethoddef move_image(self, dx, dy):", "body": "return", "docstring": "Move the image the annotation is according to by some pixels.\n\n:param dx: The amount of pixels the origin of the image is moved to the right.\n:param dy: The amount of pixels the origin of the image is moved to the bottom.\n:return:", "id": "f1764:c1:m5"} {"signature": "def crop_image(img, start_y, start_x, h, w):", "body": "return img[start_y:start_y + h, start_x:start_x + w, :].copy()", "docstring": "Crop an image given the top left corner.\n:param img: The image\n:param start_y: The top left corner y coord\n:param start_x: The top left corner x coord\n:param h: The result height\n:param w: The result width\n:return: The cropped image.", "id": "f1764:m8"} {"signature": "@abc.abstractmethoddef iou(self, other):", "body": "return ", "docstring": "Calculates the iou between two detections.\n:param other: The other detection that is used to compute the iou.\n:return: The intersection over union value.", "id": "f1764:c1:m2"} {"signature": "def augment_detections(hyper_params, feature, label):", "body": "if hyper_params.problem.get(\"\", None) is None:return feature, labelimg_h, img_w, img_c = feature[\"\"].shapeaugmented_feature = {}augmented_label = {}augmented_feature[\"\"] = feature[\"\"].copy()if \"\" in feature:augmented_feature[\"\"] = feature[\"\"].copy()if \"\" in feature:augmented_feature[\"\"] = feature[\"\"]augmented_feature[\"\"] = np.array([], dtype=np.uint8)augmented_feature[\"\"] = np.array([, ], dtype=np.int8)for k in label.keys():augmented_label[k] = [detection.copy() for detection in label[k]]if hyper_params.problem.augmentation.get(\"\", False):if random.random() < :img_h, img_w, img_c = augmented_feature[\"\"].shapeaugmented_feature[\"\"] = np.fliplr(augmented_feature[\"\"])if \"\" in feature:augmented_feature[\"\"] = np.fliplr(augmented_feature[\"\"])augmented_feature[\"\"][] = hflip_detections(augmented_label, img_w)if hyper_params.problem.augmentation.get(\"\", False):img_h, img_w, img_c = augmented_feature[\"\"].shapedx = int(random.random() * )dy = int(random.random() * )augmented_feature[\"\"] = crop_image(augmented_feature[\"\"], dy, dx, img_h - dy, img_w - dx)if \"\" in feature:augmented_feature[\"\"] = crop_image(augmented_feature[\"\"], dy, dx, img_h - dy, img_w - dx)augmented_feature[\"\"][] += dyaugmented_feature[\"\"][] += dxmove_detections(augmented_label, -dy, -dx)if hyper_params.problem.augmentation.get(\"\", None) is not None:img_h, img_w, img_c = augmented_feature[\"\"].shapetarget_w = hyper_params.problem.augmentation.random_crop.shape.widthtarget_h = hyper_params.problem.augmentation.random_crop.shape.heightdelta_x = max(int(math.ceil((target_w + - img_w) / )), )delta_y = max(int(math.ceil((target_h + - img_h) / )), )move_detections(augmented_label, delta_y, delta_x)augmented_feature[\"\"] = cv2.copyMakeBorder(augmented_feature[\"\"],delta_y, delta_y, delta_x, delta_x,cv2.BORDER_CONSTANT)img_h, img_w, img_c = augmented_feature[\"\"].shapestart_x = start_y = if len(augmented_label[\"\"]) != :idx = random.randint(, len(augmented_label[\"\"]) - )detection = augmented_label[\"\"][idx]start_x = int(detection.cx - random.random() * (target_w - ) / - )start_y = int(detection.cy - random.random() * (target_h - ) / - )else:start_x = int(img_w * random.random())start_y = int(img_h * random.random())if start_x < :start_x = if start_y < :start_y = if start_x >= img_w - target_w:start_x = img_w - target_w - if start_y >= img_h - target_h:start_y = img_h - target_h - augmented_feature[\"\"] = crop_image(augmented_feature[\"\"], start_y, start_x, target_h, target_w)if \"\" in feature:augmented_feature[\"\"] = crop_image(augmented_feature[\"\"], start_y, start_x, target_h, target_w)augmented_feature[\"\"][] += start_yaugmented_feature[\"\"][] += start_xmove_detections(augmented_label, -start_y, -start_x)if hyper_params.problem.augmentation.get(\"\", False):if random.random() < :augmented_feature[\"\"] = full_texture_augmentation(augmented_feature[\"\"])return augmented_feature, augmented_label", "docstring": "Augment the detection dataset.\n\nIn your hyper_parameters.problem.augmentation add configurations to enable features.\nSupports \"enable_horizontal_flip\", \"enable_micro_translation\", \"random_crop\" : {\"shape\": { \"width\", \"height\" }}\nand \"enable_texture_augmentation\". Make sure to also set the \"steps\" otherwise this method will not be used properly.\n\nRandom crop ensures at least one detection is in the crop region.\n\nSample configuration\n\"problem\": {\n \"augmentation\": {\n \"steps\": 40,\n \"enable_texture_augmentation\": true,\n \"enable_micro_translation\": true,\n \"enable_horizontal_flip\": true,\n \"random_crop\": {\n \"shape\": {\n \"width\": 256,\n \"height\": 256\n }\n }\n }\n}\n\n:param hyper_params: The hyper parameters object\n:param feature: A dict containing all features, must be in the style created by detection datasets.\n:param label: A label dict in the detection dataset style.\n:return: Modified feature and label dict (copied & modified).", "id": "f1764:m11"} {"signature": "def to_currency(self, val, currency='', cents=True, separator='',adjective=False):", "body": "left, right, is_negative = parse_currency_parts(val)try:cr1, cr2 = self.CURRENCY_FORMS[currency]except KeyError:raise NotImplementedError('' %(currency, self.__class__.__name__))if adjective and currency in self.CURRENCY_ADJECTIVES:cr1 = prefix_currency(self.CURRENCY_ADJECTIVES[currency],cr1)minus_str = \"\" % self.negword if is_negative else \"\"cents_str = self._cents_verbose(right, currency)if cents else self._cents_terse(right, currency)return u'' % (minus_str,self.to_cardinal(left, feminine=cr1[-]),self.pluralize(left, cr1),separator,cents_str,self.pluralize(right, cr2))", "docstring": "Args:\n val: Numeric value\n currency (str): Currency code\n cents (bool): Verbose cents\n separator (str): Cent separator\n adjective (bool): Prefix currency name with adjective\nReturns:\n str: Formatted string", "id": "f1770:c0:m6"} {"signature": "def split_by_3(self, number):", "body": "blocks = ()length = len(number)if length < :blocks += ((number,),)else:len_of_first_block = length % if len_of_first_block > :first_block = number[:len_of_first_block],blocks += first_block,for i in range(len_of_first_block, length, ):next_block = (number[i:i + ],),blocks += next_blockreturn blocks", "docstring": "starting here, it groups the number by three from the tail\n'1234567' -> (('1',),('234',),('567',))\n:param number:str\n:rtype:tuple", "id": "f1801:c0:m1"} {"signature": "async def parse_character_results(soup):", "body": "soup = list(soup.find_all('', class_='')[].children)[:]characters = []for item in soup:temp_c = {'': None, '': None, '': {}}temp_c[''] = item.abbr.get('')temp_c[''] = list(item.children)[].a.stringtemp_c[''] = []for game in list(list(list(item.children)[].children)[].children):if isinstance(game, NavigableString):continuetemp_c[''].append({'': game.string, '': game.get('').split('')[]})characters.append(temp_c)del temp_creturn characters", "docstring": "Parse a page of character results.\n\n:param soup: The BS4 class object\n:return: Returns a list of dictionaries containing a name, gender and list of dictionaries containing a game name/id pair\n for games they appeared in.", "id": "f1846:m3"} {"signature": "async def parse_release_results(soup):", "body": "soup = list(soup.find_all('', class_='')[].children)[:]releases = []for item in soup:child = list(item.children)temp_rel = {'': None, '': None, '': None, '': None}temp_rel[''] = child[].stringtemp_rel[''] = child[].stringtemp_rel[''] = child[].abbr.get('')temp_rel[''] = child[].a.stringreleases.append(temp_rel)del temp_relreturn releases", "docstring": "Parse Releases search pages.\n\n:param soup: The BS4 class object\n:return: A list of dictionaries containing a release dictionary. This is the same as the one returned in get_novel.\n It contains a Date released, Platform, Ages group and Name.", "id": "f1846:m1"} {"signature": "async def parse_prod_staff_results(soup):", "body": "soup = soup.find_all('')producers = []for item in soup:producers.append({'': item.abbr.get(''), '': item.a.string})return producers", "docstring": "Parse a page of producer or staff results\n\n:param soup: The BS4 class object\n:return: A list of dictionaries containing a name and nationality.", "id": "f1846:m2"} {"signature": "async def parse_vn_results(soup):", "body": "soup = soup.find_all('', class_='')vns = []for item in soup[:]:vns.append({'': item.string, '': item.a.get('')[:]})return vns", "docstring": "Parse Visual Novel search pages.\n\n:param soup: The BS4 class object\n:return: A list of dictionaries containing a name and id.", "id": "f1846:m0"} {"signature": "async def parse_search(self, stype, soup):", "body": "if stype == '':return await parse_vn_results(soup)elif stype == '':return await parse_release_results(soup)elif stype == '':return await parse_prod_staff_results(soup)elif stype == '':return await parse_prod_staff_results(soup)elif stype == '':return await parse_character_results(soup)elif stype == '':return await parse_tag_results(soup)elif stype == '':return await parse_tag_results(soup)elif stype == '':return await parse_user_results(soup)", "docstring": "This is our parsing dispatcher\n\n:param stype: Search type category\n:param soup: The beautifulsoup object that contains the parsed html", "id": "f1848:c0:m3"} {"signature": "def __setattr__(self, key, value):", "body": "if self.__isfrozen and not hasattr(self, key):raise TypeError(\"\" % self)object.__setattr__(self, key, value)", "docstring": "protect users from abusing properties", "id": "f1854:c3:m11"} {"signature": "def _modify_service_config(self):", "body": "self.project.get_service('').options[''] = [\"\"]bootstrap = self.project.get_service('')bootstrap.options[''] = [p.replace(\"\", \"\")for p in bootstrap.options['']]for service in self.project.services:container_name = service.options.get('')if container_name:service.options[''] = container_name.replace(\"\",PROJECT_NAME)", "docstring": "Modify the services configurations to allow testing.", "id": "f1859:c0:m1"} {"signature": "def initialize(ctx, a='', c='', d=''):", "body": "", "docstring": "Constructor (can a constructor return anything?)", "id": "f1862:c1:m0"} {"signature": "def on_receive_transactions(self, proto, transactions):", "body": "log.debug('')log.debug('', count=len(transactions), remote_id=proto)def _add_txs():for tx in transactions:self.add_transaction(tx, origin=proto)gevent.spawn(_add_txs)", "docstring": "receives rlp.decoded serialized", "id": "f1868:c2:m18"} {"signature": "def transfer(ctx, _to='', _value='', returns=STATUS):", "body": "log.DEV('')if ctx.accounts[ctx.msg_sender] >= _value:ctx.accounts[ctx.msg_sender] -= _valuectx.accounts[_to] += _valuectx.Transfer(ctx.msg_sender, _to, _value)return OKelse:return INSUFFICIENTFUNDS", "docstring": "Standardized Contract API:\n function transfer(address _to, uint256 _value) returns (bool _success)", "id": "f1870:c2:m1"} {"signature": "def approve(ctx, _spender='', _value='', returns=STATUS):", "body": "ctx.allowances[ctx.msg_sender][_spender] += _valuectx.Approval(ctx.msg_sender, _spender, _value)return OK", "docstring": "Standardized Contract API:\n function approve(address _spender, uint256 _value) returns (bool success)", "id": "f1870:c2:m5"} {"signature": "@propertydef last_valid_lockset(self):", "body": "for r in self.rounds:ls = self.rounds[r].locksetif ls.is_valid:return lsreturn None", "docstring": "highest valid lockset on height", "id": "f1875:c9:m4"} {"signature": "def check(self):", "body": "if not self.is_valid:return Truetest = (self.has_quorum, self.has_quorum_possible, self.has_noquorum)assert == len([x for x in test if x is not None])return True", "docstring": "either invalid or one of quorum, noquorum, quorumpossible", "id": "f1876:c9:m14"} {"signature": "def sign(self, privkey):", "body": "if self.v:raise InvalidSignature(\"\")if privkey in (, '', '' * ):raise InvalidSignature(\"\")rawhash = sha3(rlp.encode(self, self.__class__.exclude(['', '', ''])))if len(privkey) == :privkey = encode_privkey(privkey, '')pk = PrivateKey(privkey, raw=True)signature = pk.ecdsa_recoverable_serialize(pk.ecdsa_sign_recoverable(rawhash, raw=True))signature = signature[] + chr(signature[])self.v = ord(signature[]) + self.r = big_endian_to_int(signature[:])self.s = big_endian_to_int(signature[:])self._sender = Nonereturn self", "docstring": "Sign this with a private key", "id": "f1876:c3:m1"} {"signature": "def mk_privkeys(num):", "body": "privkeys = []assert num <= num_colorsfor i in range(num):j = while True:k = sha3(str(j))a = privtoaddr(k)an = big_endian_to_int(a)if an % num_colors == i:breakj += privkeys.append(k)return privkeys", "docstring": "make privkeys that support coloring, see utils.cstr", "id": "f1877:m0"} {"signature": "def delay(self, sender, receiver, packet, add_delay=):", "body": "bw = min(sender.ul_bandwidth, receiver.dl_bandwidth)delay = sender.base_latency + receiver.base_latencydelay += len(packet) / bwdelay += add_delayreturn delay", "docstring": "bandwidths are inaccurate, as we don't account for parallel transfers here", "id": "f1877:c0:m1"} {"signature": "def on_proposal(self, proposal, proto):", "body": "assert isinstance(proto, HDCProtocol)assert isinstance(proposal, Proposal)if proposal.height >= self.cm.height:assert proposal.lockset.is_validself.last_active_protocol = proto", "docstring": "called to inform about synced peers", "id": "f1879:c0:m5"} {"signature": "def iter_attribute(iterable_name) -> Union[Iterable, Callable]:", "body": "def create_new_class(decorated_class) -> Union[Iterable, Callable]:\"\"\"\"\"\"assert inspect.isclass(decorated_class), ''assert isinstance(iterable_name, str), ''decorated_class.iterator_attr_index = def __iter__(instance) -> Iterable:\"\"\"\"\"\"return instancedef __next__(instance) -> Any:\"\"\"\"\"\"assert hasattr(instance, iterable_name),''.format(iterable_name)assert isinstance(getattr(instance, iterable_name), collections.Iterable),''.format(iterable_name, instance.__class__.__name__)ind = instance.iterator_attr_indexwhile ind < len(getattr(instance, iterable_name)):val = getattr(instance, iterable_name)[ind]instance.iterator_attr_index += return valinstance.iterator_attr_index = raise StopIterationdct = dict(decorated_class.__dict__)dct[''] = __iter__dct[''] = __next__dct[''] = decorated_class.iterator_attr_indexreturn type(decorated_class.__name__, (collections.Iterable,), dct)return create_new_class", "docstring": "Decorator implementing Iterator interface with nicer manner.\n\n Example\n -------\n\n @iter_attribute('my_attr'):\n class DecoratedClass:\n ...\n\n Warning:\n ========\n\n When using PyCharm or MYPY you'll probably see issues with decorated class not being recognized as Iterator.\n That's an issue which I could not overcome yet, it's probably due to the fact that interpretation of object\n is being done statically rather than dynamically. MYPY checks for definition of methods in class code which\n changes at runtime. Since __iter__ and __next__ are added dynamically MYPY cannot find those\n defined in objects before object of a class is created. Possible workarounds for this issue are:\n\n 1. Define ``dummy`` __iter__ class like:\n\n @iter_attribute('attr')\n class Test:\n def __init__(self) -> None:\n self.attr = [1, 2, 3]\n\n def __iter__(self):\n pass\n\n 2. After creating object use cast or assert function denoting that particular instance inherits\n from collections.Iterator:\n\n assert isinstance(my_object, collections.Iterator)\n\n\n :param iterable_name: string representing attribute name which has to be iterated\n :return: DecoratedClass with implemented '__iter__' and '__next__' methods.", "id": "f1885:m0"} {"signature": "def __init__(self, buffer_or_path):", "body": "self._path = Noneself._path, buffer = to_buffer(buffer_or_path)with buffer as f:self._raw_environments, self._raw_variables_info, self._dfs = parse_eso(f)self._start_year = None", "docstring": "Parameters\n----------\nbuffer_or_path\n\nInitially, standard_output will have tuple instants (using 'year', 'month', 'day', 'hour', 'minute' columns,\n depending on given frequency). It is possible to change to datetime mode later.", "id": "f1902:c0:m0"} {"signature": "def get_data(self, environment_title_or_num=-, frequency=None):", "body": "if isinstance(environment_title_or_num, int):environment_title = tuple(self._raw_environments.keys())[environment_title_or_num]else:environment_title = environment_title_or_numif environment_title not in self._dfs:raise ValueError(f\"\")environment_dfs = self._dfs[environment_title]if frequency is None:for frequency in FREQUENCIES:if environment_dfs[frequency] is not None:breakif frequency not in FREQUENCIES:raise ValueError(f\"\")return self._dfs[environment_title][frequency]", "docstring": "Parameters\n----------\nenvironment_title_or_num\nfrequency: 'str', default None\n 'timestep', 'hourly', 'daily', 'monthly', 'annual', 'run_period'\n If None, will look for the smallest frequency of environment.", "id": "f1902:c0:m3"} {"signature": "@propertydef dir_path(self):", "body": "return self._dir_path", "docstring": "Returns\n-------\nsimulation directory path", "id": "f1904:c1:m7"} {"signature": "@propertydef _file_refs(self):", "body": "if self._prepared_file_refs is None:self._prepared_file_refs = {FILE_REFS.idf: FileInfo(constructor=lambda path: self._epm_cls.from_idf(path, idd_or_buffer_or_path=self._idd),get_path=lambda: get_input_file_path(self.dir_path, FILE_REFS.idf)),FILE_REFS.epw: FileInfo(constructor=lambda path: self._weather_data_cls.from_epw(path),get_path=lambda: get_input_file_path(self.dir_path, FILE_REFS.epw)),FILE_REFS.eio: FileInfo(constructor=lambda path: self._eio_cls(path),get_path=lambda: get_output_file_path(self.dir_path, FILE_REFS.eio)),FILE_REFS.eso: FileInfo(constructor=lambda path: self._standard_output_cls(path),get_path=lambda: get_output_file_path(self.dir_path,FILE_REFS.eso)),FILE_REFS.mtr: FileInfo(constructor=lambda path: self._standard_output_cls(path),get_path=lambda: get_output_file_path(self.dir_path, FILE_REFS.mtr)),FILE_REFS.mtd: FileInfo(constructor=lambda path: self._mtd_cls(path),get_path=lambda: get_output_file_path(self.dir_path, FILE_REFS.mtd)),FILE_REFS.mdd: FileInfo(constructor=lambda path: open(path).read(),get_path=lambda: get_output_file_path(self.dir_path, FILE_REFS.mdd)),FILE_REFS.err: FileInfo(constructor=lambda path: self._err_cls(path),get_path=lambda: get_output_file_path(self.dir_path, FILE_REFS.err)),FILE_REFS.summary_table: FileInfo(constructor=lambda path: self._summary_table_cls(path),get_path=lambda: get_output_file_path(self.dir_path, FILE_REFS.summary_table))}return self._prepared_file_refs", "docstring": "Defined here so that we can use the class variables, in order to subclass in oplusplus", "id": "f1904:c1:m4"} {"signature": "def get_bounds(self):", "body": "start, end = None, Noneif len(self._weather_series) == :return start, endfor i in (, -):if self.has_tuple_instants:row = self._weather_series.iloc[i, :]instant = dt.datetime(row[\"\"], row[\"\"], row[\"\"], row[\"\"], row[\"\"])else:instant = self._weather_series.index[i].to_pydatetime()if i == :start = instantelse:end = instantreturn start, end", "docstring": "Returns\n-------\n(start, end)\n\nDatetime instants of beginning and end of data. If no data, will be: (None, None).", "id": "f1910:c0:m8"} {"signature": "def _check_and_sanitize_datetime_instants(df):", "body": "if df is None or len(df) == :return dfif not isinstance(df.index, pd.DatetimeIndex):raise ValueError(\"\")if df.index.freq != \"\":forced_df = df.asfreq(\"\")try:assert_index_equal(df.index, forced_df.index)except AssertionError:raise ValueError(f\"\"f\"\") from Nonedf = forced_dfif df.index[].minute != :raise ValueError(\"\")return df", "docstring": "Parameters\n----------\ndf\n\nReturns\n-------\nsanitized df", "id": "f1910:m2"} {"signature": "def get_data(self, simulation_step=None, error_category=None):", "body": "if simulation_step is None and error_category is None:return self._df.dropna(axis=\"\", how=\"\")if simulation_step is not None:if simulation_step not in self._simulation_step_list:raise RuntimeError(\"\" % simulation_step)if error_category is not None:if error_category not in self.CATEGORIES:raise RuntimeError(\"\" % error_category)iterables = [simulation_step, error_category]columns = pd.MultiIndex.from_product(iterables)series = self._df[simulation_step][error_category].dropna(axis=\"\", how=\"\")df = pd.DataFrame(index=series.index, columns=columns)df[simulation_step] = seriesreturn dfreturn self._df[simulation_step].dropna(axis=\"\", how=\"\")if error_category is not None:if error_category not in self.CATEGORIES:raise RuntimeError(\"\" % error_category)df = self._df.copy()df.columns = df.columns.swaplevel(, )return df[error_category].dropna(axis=\"\", how=\"\")", "docstring": "Parameters\n----------\nsimulation_step: if not given, returns a raw report\nerror_category: if only one argument is specified, swaps dataframe report", "id": "f1921:c0:m3"} {"signature": "def delete(self):", "body": "self.select().delete()", "docstring": "Deletes all records of table.", "id": "f1924:c0:m15"} {"signature": "def __getitem__(self, item):", "body": "if isinstance(item, str):if self._dev_auto_pk:raise KeyError(f\"\")try:return self._records[item]except KeyError:raise KeyError(f\"\")if isinstance(item, int):return self.select()[item] raise KeyError(\"\")", "docstring": "Parameters\n----------\nitem: str or int\n if str: value of record name. If table does not have a name field, raises a KeyError\n if int: record position (records are ordered by their content, not by creation order)\n\nReturns\n-------\nRecord instance", "id": "f1924:c0:m6"} {"signature": "def get_pointing_records(self):", "body": "return self.get_epm()._dev_relations_manager.get_pointing_on(self)", "docstring": "Returns\n-------\nMultiTableQueryset of all records pointed by record.", "id": "f1925:c0:m27"} {"signature": "def clear_extensible_fields(self):", "body": "if not self.is_extensible():raise TypeError(\"\")cycle_start, cycle_len, patterns = self.get_extensible_info()return [self.get_serialized_value(i) for i in range(cycle_start, len(self))]", "docstring": "Returns\n-------\nlist of cleared fields (serialized)", "id": "f1925:c0:m35"} {"signature": "def __init__(self, table, data=None):", "body": "self._table = table self._data = {}self._initialized = Trueif data is not None:self._update_inert(data)", "docstring": "Parameters\n----------\ntable\ndata: dict, default {}\n key: index_or_ref, value: raw value or value", "id": "f1925:c0:m0"} {"signature": "def __getattr__(self, item):", "body": "index = self._table._dev_descriptor.get_field_index(item)return self[index]", "docstring": "Parameters\n----------\nitem: field name", "id": "f1925:c0:m16"} {"signature": "def get_pk(self):", "body": "return id(self) if self._table._dev_auto_pk else self[]", "docstring": "Returns\n-------\nIf record has a name, returns its name, else returns record's python id.", "id": "f1925:c0:m24"} {"signature": "def _update_value_inert(self, index, value):", "body": "field_descriptor = self._table._dev_descriptor.get_field_descriptor(index)value = field_descriptor.deserialize(value, index)if isinstance(value, Link):current_link = self._data.get(index)if current_link is not None:current_link.unregister()if isinstance(value, RecordHook):current_record_hook = self._data.get(index)if current_record_hook is not None:current_record_hook.unregister()if isinstance(value, ExternalFile):current_external_file = self._data.get(index)if current_external_file is not None:current_external_file._dev_unregister()if value in (None, NONE_RECORD_HOOK, NONE_LINK, NONE_EXTERNAL_FILE):self._dev_set_none_without_unregistering(index, check_not_required=False)returnold_hook = Noneif index == and not self._table._dev_auto_pk:old_hook = self._data.get() self._data[index] = valueif old_hook is not None:self._table._dev_record_pk_was_updated(old_hook.target_value)", "docstring": "is only called by _update_inert", "id": "f1925:c0:m3"} {"signature": "def __getitem__(self, item):", "body": "item = self._field_key_to_index(item)if item > len(self):raise IndexError(\"\")value = self._data.get(item)if isinstance(value, RecordHook):return value.target_valueif isinstance(value, Link):return value.target_recordreturn value", "docstring": "Parameters\n----------\nitem: field lowercase name or index\n\nReturns\n-------\nField value", "id": "f1925:c0:m14"} {"signature": "def parse_idf(file_like):", "body": "tables_data = {}head_comment = \"\"record_data = Nonemake_new_record = Truecopyright_list = get_multi_line_copyright_message().split(\"\")for i, raw_line in enumerate(file_like):try:copyright_line = copyright_list[i]if raw_line.strip() == copyright_line:continueexcept IndexError:passsplit_line = raw_line.split(\"\")if len(split_line) == :if len(split_line[].strip()) == :content, comment = None, Noneelse:content, comment = split_line[].strip(), Noneelse:if len(split_line[].strip()) == :content, comment = None, \"\".join(split_line[:])else:content, comment = split_line[].strip(), \"\".join(split_line[:])if (content, comment) == (None, None):continueif not content:if record_data is None: head_comment += comment.strip() + \"\"continuerecord_end = content[-] == \"\"content = content[:-] content_l = [text.strip() for text in content.split(\"\")]if make_new_record:table_ref = table_name_to_ref(content_l[].strip())if table_ref.lower() in (\"\",\"\",\"\",\"\"):continueif table_ref not in tables_data:tables_data[table_ref] = []record_data = dict()tables_data[table_ref].append(record_data)content_l = content_l[:]make_new_record = Falsefor value_s in content_l:field_index = len(record_data)record_data[field_index] = value_sif record_end:make_new_record = Truetables_data[\"\"] = head_commentreturn tables_data", "docstring": "Records are created from string.\nThey are not attached to idf yet.\nin idf: header comment, chapter comments, records\nin record: head comment, field comments, tail comment", "id": "f1926:m0"} {"signature": "def to_json(self, buffer_or_path=None, indent=):", "body": "return json_data_to_json(self.to_json_data(),buffer_or_path=buffer_or_path,indent=indent)", "docstring": "Parameters\n----------\nbuffer_or_path: buffer or path, default None\n output to write into. If None, will return a json string.\nindent: int, default 2\n Defines the indentation of the json\n\nReturns\n-------\nNone, or a json string (if buffer_or_path is None).", "id": "f1929:c0:m19"} {"signature": "def dump_external_files(self, target_dir_path=None):", "body": "self._dev_external_files_manager.dump_external_files(target_dir_path=target_dir_path)", "docstring": "Parameters\n----------\ntarget_dir_path", "id": "f1929:c0:m14"} {"signature": "def _dev_populate_from_json_data(self, json_data):", "body": "comment = json_data.pop(\"\", None)if comment is not None:self._comment = commentexternal_files_data = json_data.pop(\"\", dict())self._dev_external_files_manager.populate_from_json_data(external_files_data)added_records = []for table_ref, json_data_records in json_data.items():table = getattr(self, table_ref)records = table._dev_add_inert(json_data_records)added_records.extend(records)for r in added_records:r._dev_activate_hooks()for r in added_records:r._dev_activate_links()r._dev_activate_external_files()", "docstring": "!! Must only be called once, when empty !!", "id": "f1929:c0:m2"} {"signature": "@classmethoddef from_json(cls, buffer_or_path, check_required=True, idd_or_buffer_or_path=None):", "body": "return cls._create_from_buffer_or_path(json.load,buffer_or_path,idd_or_buffer_or_path=idd_or_buffer_or_path,check_required=check_required)", "docstring": "Parameters\n----------\nbuffer_or_path: json buffer or path\ncheck_required: boolean, default True\n If True, will raise an exception if a required field is missing. If False, not not perform any checks.\nidd_or_buffer_or_path: (expert) to load using a custom idd\n\nReturns\n-------\nAn Epm instance.", "id": "f1929:c0:m17"} {"signature": "def register_link(self, link):", "body": "keys = tuple((ref, link.initial_hook_value) for ref in link.hook_references)for k in keys:if k in self._record_hooks:link.set_target(target_record=self._record_hooks[k].target_record)breakelse:for k in keys:if k in self._table_hooks:link.set_target(target_table=self._table_hooks[k])breakelse:field_descriptor = link.source_record.get_field_descriptor(link.source_index)raise FieldValidationError(f\"\"f\"\")if link.source_record not in self._links_by_source:self._links_by_source[link.source_record] = set()self._links_by_source[link.source_record].add(link)if link.target not in self._links_by_target:self._links_by_target[link.target] = set()self._links_by_target[link.target].add(link)", "docstring": "source record and index must have been set", "id": "f1930:c0:m3"} {"signature": "@propertydef detailed_type(self):", "body": "if self._detailed_type is None:if (\"\" in self.tags) or (\"\" in self.tags):self._detailed_type = \"\"elif \"\" in self.tags:self._detailed_type = self.tags[\"\"][].lower() elif \"\" in self.tags:self._detailed_type = \"\"elif \"\" in self.tags:self._detailed_type = \"\"elif \"\" in self.tags:self._detailed_type = \"\"elif self.basic_type == \"\":self._detailed_type = \"\"elif self.basic_type == \"\":self._detailed_type = \"\"else:raise ValueError(\"\")return self._detailed_type", "docstring": "Uses EPlus double approach of type ('type' tag, and/or 'key', 'object-list', 'external-list', 'reference' tags)\nto determine detailed type.\n\nReturns\n-------\n\"integer\", \"real\", \"alpha\", \"choice\", \"reference\", \"object-list\", \"external-list\", \"node\"", "id": "f1931:c0:m7"} {"signature": "def select(self, filter_by=None):", "body": "iterator = self._records if filter_by is None else filter(filter_by, self._records)return Queryset(self._table, iterator)", "docstring": "Parameters\n----------\nfilter_by: callable, default None\n Callable must take one argument (a record of queryset), and return True to keep record, or False to skip it.\n Example : .select(lambda x: x.name == \"my_name\").\n If None, records are not filtered.\n\nReturns\n-------\nQueryset instance, containing all selected records.", "id": "f1935:c0:m9"} {"signature": "def one(self, filter_by=None):", "body": "qs = self if filter_by is None else self.select(filter_by=filter_by)if len(qs) == :raise RecordDoesNotExistError(\"\")if len(qs) > :raise MultipleRecordsReturnedError(\"\")return qs[]", "docstring": "Parameters\n----------\nfilter_by: callable, default None\n Callable must take one argument (a record of table), and return True to keep record, or False to skip it.\n Example : .one(lambda x: x.name == \"my_name\").\n If None, records are not filtered.\n\nReturns\n-------\nRecord instance if one and only one record is found. Else raises.\n\nRaises\n------\nRecordDoesNotExistError if no record is found\nMultipleRecordsReturnedError if multiple records are found", "id": "f1935:c0:m10"} {"signature": "def delete(self):", "body": "for r in self:r.delete()self._records = ()", "docstring": "Deletes all records of queryset.", "id": "f1935:c0:m11"} {"signature": "def __init__(self, references, index, value):", "body": "self.references = referencesself.target_index = indexself.target_value = valueself.target_record = None", "docstring": "target_value must always be relevant : !! don't forget to deactivate hook if field of record changes !!", "id": "f1936:c0:m0"} {"signature": "def __init__(self, category):", "body": "self.category = categoryself.text = \"\"", "docstring": "Parameters\n----------\ncategory: str,\n code, markdown", "id": "f1953:c0:m0"} {"signature": "@classmethoddef load_from_file_like(cls, flo, format=None):", "body": "format = self.format if format is None else formatload = getattr(cls, \"\" % format, None)if load is None:raise ValueError(\"\" % format)return load(flo)", "docstring": "Load the object to a given file like object with the given\n protocol.", "id": "f1957:c0:m1"} {"signature": "def windows(iterable, length=, overlap=, padding=True):", "body": "it = iter(iterable)results = list(itertools.islice(it, length))while len(results) == length:yield resultsresults = results[length-overlap:]results.extend(itertools.islice(it, length-overlap))if padding and results:results.extend(itertools.repeat(None, length-len(results)))yield results", "docstring": "Code snippet from Python Cookbook, 2nd Edition by David Ascher,\n Alex Martelli and Anna Ravenscroft; O'Reilly 2005\n\n Problem: You have an iterable s and need to make another iterable whose\n items are sublists (i.e., sliding windows), each of the same given length,\n over s' items, with successive windows overlapping by a specified amount.", "id": "f1958:m2"} {"signature": "def _vp_default(self):", "body": "vp = Viewport(component=self.component)vp.enable_zoom=Truevp.view_position = [-, -]vp.tools.append(ViewportPanTool(vp))return vp", "docstring": "Trait initialiser.", "id": "f1959:c0:m9"} {"signature": "def _default_edge_default(self):", "body": "return Edge(\"\", \"\")", "docstring": "Trait initialiser.", "id": "f1959:c0:m5"} {"signature": "def create(self, prog=None, format=None):", "body": "prog = self.program if prog is None else progformat = self.format if format is None else formattmp_fd, tmp_name = tempfile.mkstemp()os.close( tmp_fd )dot_fd = file( tmp_name, \"\" )self.save_dot( dot_fd )dot_fd.close()tmp_dir = os.path.dirname( tmp_name )p = subprocess.Popen(( self.programs[ prog ], ''+format, tmp_name ),cwd=tmp_dir,stderr=subprocess.PIPE, stdout=subprocess.PIPE)stderr = p.stderrstdout = p.stdoutstdout_output = list()while True:data = stdout.read()if not data:breakstdout_output.append(data)stdout.close()if stdout_output:stdout_output = ''.join(stdout_output)if not stderr.closed:stderr_output = list()while True:data = stderr.read()if not data:breakstderr_output.append(data)stderr.close()if stderr_output:stderr_output = ''.join(stderr_output)status = p.wait()if status != :logger.error(\"\"\"\" % ( status, stderr_output ) )elif stderr_output:logger.error( \"\", stderr_output )os.unlink(tmp_name)return stdout_output", "docstring": "Creates and returns a representation of the graph using the\n Graphviz layout program given by 'prog', according to the given\n format.\n\n Writes the graph to a temporary dot file and processes it with\n the program given by 'prog' (which defaults to 'dot'), reading\n the output and returning it as a string if the operation is\n successful. On failure None is returned.", "id": "f1959:c0:m15"} {"signature": "def __iter__(self):", "body": "for each in self.nodes:yield each", "docstring": "Return a iterator passing through all nodes in the graph.\n\n @rtype: iterator\n @return: Iterator passing through all nodes in the graph.", "id": "f1959:c0:m1"} {"signature": "def _component_default(self):", "body": "return Container(draw_axes=True, fit_window=False, auto_size=True)", "docstring": "Trait initialiser.", "id": "f1959:c0:m8"} {"signature": "def save_as(self):", "body": "pass", "docstring": "Saves the editor content to a new file name.", "id": "f1962:c0:m5"} {"signature": "def _action_sets_default(self):", "body": "from action import GodotWorkbenchActionSetreturn [GodotWorkbenchActionSet]", "docstring": "Trait initialiser.", "id": "f1963:c0:m2"} {"signature": "def _editor_input_default(self):", "body": "return self.obj", "docstring": "Trait initialiser.", "id": "f1965:c0:m2"} {"signature": "def _name_default(self):", "body": "self.obj.on_trait_change(self.on_path, \"\")return basename(self.obj.path)", "docstring": "Trait initialiser.", "id": "f1965:c0:m0"} {"signature": "def normal_left_down(self, event):", "body": "print(\"\" % (event.x, event.y))", "docstring": "Handles left mouse button clicks in 'normal' mode", "id": "f1968:c0:m3"} {"signature": "def _draw_mainlayer(self, gc, view_bounds=None, mode=\"\"):", "body": "x_origin = self.x_originy_origin = self.y_origingc.save_state()try:self._draw_bounds(gc)", "docstring": "Draws the component", "id": "f1968:c0:m0"} {"signature": "def is_in(self, point_x, point_y):", "body": "x = self.x_originy = self.y_origina = self.e_widthb = self.e_heightreturn ((point_x-x)**/(a**)) + ((point_y-y)**/(b**)) < ", "docstring": "Test if the point is within this ellipse", "id": "f1968:c0:m1"} {"signature": "def _anytrait_changed_for_component(self, new):", "body": "self.canvas.request_redraw()", "docstring": "Handles redrawing of the canvas.", "id": "f1972:c0:m3"} {"signature": "def cbrt(x):", "body": "if x >= :return pow(x, /)else:return -pow(abs(x), /)", "docstring": "cbrt(x) = x^{1/3}, if x >= 0\n = -|x|^{1/3}, if x < 0", "id": "f1975:m0"} {"signature": "def cubic(a, b, c, d=None):", "body": "if d: a, b, c = b / float(a), c / float(a), d / float(a)t = a / p, q = b - * t**, c - b * t + * t**u, v = quadratic(q, -(p/)**)if type(u) == type(): r, w = polar(u.real, u.imag)y1 = * cbrt(r) * cos(w / )else: y1 = cbrt(u) + cbrt(v)y2, y3 = quadratic(y1, p + y1**)return y1 - t, y2 - t, y3 - t", "docstring": "x^3 + ax^2 + bx + c = 0 (or ax^3 + bx^2 + cx + d = 0)\n With substitution x = y-t and t = a/3, the cubic equation reduces to\n y^3 + py + q = 0,\n where p = b-3t^2 and q = c-bt+2t^3. Then, one real root y1 = u+v can\n be determined by solving\n w^2 + qw - (p/3)^3 = 0\n where w = u^3, v^3. From Vieta's theorem,\n y1 + y2 + y3 = 0\n y1 y2 + y1 y3 + y2 y3 = p\n y1 y2 y3 = -q,\n the other two (real or complex) roots can be obtained by solving\n y^2 + (y1)y + (p+y1^2) = 0", "id": "f1975:m3"} {"signature": "def polar(x, y, deg=): ", "body": "if deg:return hypot(x, y), * atan2(y, x) / pielse:return hypot(x, y), atan2(y, x)", "docstring": "Convert from rectangular (x,y) to polar (r,w)\n r = sqrt(x^2 + y^2)\n w = arctan(y/x) = [-\\pi,\\pi] = [-180,180]", "id": "f1975:m1"} {"signature": "def _parse_xdot_directive(self, name, new):", "body": "parser = XdotAttrParser()components = parser.parse_xdot_data(new)x1 = min( [c.x for c in components] )y1 = min( [c.y for c in components] )print(\"\", name, x1, y1)move_to_origin( components )for c in components:if isinstance(c, Ellipse):component.x_origin -= x1component.y_origin -= y1c.position = [ c.x - x1, c.y - y1 ]elif isinstance(c, (Polygon, BSpline)):print(\"\", c.points)c.points = [ (t[] - x1, t[] - y1) for t in c.points ]print(\"\", c.points)elif isinstance(c, Text):font = str_to_font( str(c.pen.font) )", "docstring": "Handles parsing Xdot drawing directives.", "id": "f1976:c0:m6"} {"signature": "def __str__(self):", "body": "attrs = []for trait_name, trait in self.traits(graphviz=True).items():value = getattr(self, trait_name)if (value != trait.default) and (trait.default is not None):if isinstance( value, str ):valstr = '' % valueelse:valstr = str( value )attrs.append('' % (trait_name, valstr))if attrs:attrstr = \"\" % \"\".join(attrs)else:attrstr = \"\"edge_str = \"\" % ( self.tail_node.ID, self.tailport,self.conn,self.head_node.ID, self.headport,attrstr )return edge_str", "docstring": "Returns a string representation of the edge.", "id": "f1976:c0:m1"} {"signature": "@on_trait_change(\"\")def arrange_all(self):", "body": "import godot.dot_data_parserimport godot.graphgraph = godot.graph.Graph( ID=\"\", directed=True )self.conn = \"\"graph.edges.append( self )xdot_data = graph.create( format=\"\" )print \"\", xdot_dataparser = godot.dot_data_parser.GodotDataParser()ndata = xdot_data.replace('','')tokens = parser.dotparser.parseString(ndata)[]for element in tokens[]:cmd = element[]if cmd == \"\":cmd, src, dest, opts = elementself.set( **opts )", "docstring": "Arrange the components of the node using Graphviz.", "id": "f1976:c0:m5"} {"signature": "def _vp_default(self):", "body": "vp = Viewport( component=self.component )vp.enable_zoom=Truevp.tools.append( ViewportPanTool(vp) )return vp", "docstring": "Trait initialiser.", "id": "f1976:c0:m3"} {"signature": "def setUp(self):", "body": "node1 = DomainNode(name=\"\")node2 = DomainNode(name=\"\")edge1 = DomainEdge(source=node1, target=node2)model = DomainModel(nodes=[node1, node2], edges=[edge1])self.view_model = DomainViewModel(model=model)", "docstring": "Prepares the test fixture before each test method is called.", "id": "f1978:c6:m0"} {"signature": "def setUp(self):", "body": "self.mapping = Mapping(nodes=[NodeMapping(containment_trait=\"\", element=DomainNode,dot_node=DotGraphNode(shape=NODE_SHAPE),tools=[MoveTool, ElementTool]),NodeMapping(containment_trait=\"\", element=OtherNode,dot_node=DotGraphNode(shape=OTHER_NODE_SHAPE),tools=[MoveTool, ElementTool])])node1 = DomainNode(name=\"\")node2 = DomainNode(name=\"\")self.model = DomainModel(nodes=[node1, node2],edges=[DomainEdge(source=node1, target=node2)])return", "docstring": "Prepares the test fixture before each test method is called.", "id": "f1981:c5:m0"} {"signature": "def build_graph(self, graph, tokens):", "body": "subgraph = Nonefor element in tokens:cmd = element[]if cmd == ADD_NODE:cmd, nodename, opts = elementgraph.add_node(nodename, **opts)elif cmd == ADD_EDGE:cmd, src, dest, opts = elementsrcport = destport = \"\"if isinstance(src,tuple):srcport = src[]src = src[]if isinstance(dest,tuple):destport = dest[]dest = dest[]graph.add_edge(src, dest, tailport=srcport, headport=destport,**opts)elif cmd in [ADD_GRAPH_TO_NODE_EDGE,ADD_GRAPH_TO_GRAPH_EDGE,ADD_NODE_TO_GRAPH_EDGE]:cmd, src, dest, opts = elementsrcport = destport = \"\"if isinstance(src,tuple):srcport = src[]if isinstance(dest,tuple):destport = dest[]if not (cmd == ADD_NODE_TO_GRAPH_EDGE):if cmd == ADD_GRAPH_TO_NODE_EDGE:src = subgraphelse:src = prev_subgraphdest = subgraphelse:dest = subgraphsrc_is_graph = isinstance(src, (Subgraph, Cluster))dst_is_graph = isinstance(dst, (Subgraph, Cluster))if src_is_graph:src_nodes = src.nodeselse:src_nodes = [src]if dst_is_graph:dst_nodes = dst.nodeselse:dst_nodes = [dst]for src_node in src_nodes:for dst_node in dst_nodes:graph.add_edge(from_node=src_node, to_node=dst_node,tailport=srcport, headport=destport,**kwds)elif cmd == SET_GRAPH_ATTR:graph.set( **element[] )elif cmd == SET_DEF_NODE_ATTR:graph.default_node.set( **element[] )elif cmd == SET_DEF_EDGE_ATTR:graph.default_edge.set( **element[] )elif cmd == SET_DEF_GRAPH_ATTR:graph.default_graph.set( **element[] )elif cmd == ADD_SUBGRAPH:cmd, name, elements = elementif subgraph:prev_subgraph = subgraphif name.startswith(\"\"):cluster = Cluster(ID=name)cluster = self.build_graph(cluster, elements)graph.add_cluster(cluster)else:subgraph = Subgraph(ID=name)subgraph = self.build_graph(subgraph, elements)graph.add_subgraph(subgraph)return graph", "docstring": "Builds a Godot graph.", "id": "f1983:c0:m4"} {"signature": "def save_as(self, info):", "body": "if not info.initialized:return", "docstring": "Handles saving the current model to file.", "id": "f1988:c0:m6"} {"signature": "def configure_edges(self, info):", "body": "if info.initialized:self.model.edit_traits(parent=info.ui.control,kind=\"\", view=edges_view)", "docstring": "Handles display of the edges editor.", "id": "f1988:c0:m9"} {"signature": "def save(self, info):", "body": "save_file = self.save_fileif not isfile(save_file):self.save_as(info)else:fd = Nonetry:fd = open(save_file, \"\")dot_code = str(self.model)fd.write(dot_code)finally:if fd is not None:fd.close()", "docstring": "Handles saving the current model to the last file.", "id": "f1988:c0:m5"} {"signature": "def on_exit(self, info):", "body": "if self.prompt_on_exit:retval = confirm(parent = info.ui.control,message = \"\",title = \"\",default = YES)if retval == YES:self._on_close( info )else:self._on_close( info )", "docstring": "Handles the user attempting to exit Godot.", "id": "f1988:c0:m19"} {"signature": "def add_subgraph(self, info):", "body": "if not info.initialized:returngraph = self._request_graph(info.ui.control)if graph is not None:subgraph = Subgraph()retval = subgraph.edit_traits(parent = info.ui.control,kind = \"\")if retval.result:graph.subgraphs.append(subgraph)", "docstring": "Handles adding a Subgraph to the main graph.", "id": "f1988:c0:m13"} {"signature": "def _edges_replaced(self, object, name, old, new):", "body": "self._delete_edges(old)self._add_edges(new)", "docstring": "Handles a list of edges being set.", "id": "f1989:c3:m9"} {"signature": "def _delete_nodes(self, features):", "body": "graph = self._graphif graph is not None:for feature in features:graph.delete_node( id(feature) )graph.arrange_all()", "docstring": "Removes the node corresponding to each item in 'features'.", "id": "f1989:c3:m8"} {"signature": "def get_label ( self, object ):", "body": "label = self.labelif label[:] == '':return label[:]label = xgetattr( object, label, '' )if self.formatter is None:return labelreturn self.formatter( object, label )", "docstring": "Gets the label to display for a specified object.", "id": "f1989:c1:m0"} {"signature": "def _nodes_replaced(self, object, name, old, new):", "body": "self._delete_nodes(old)self._add_nodes(new)", "docstring": "Handles a list of nodes being set.", "id": "f1989:c3:m5"} {"signature": "def _edges_changed(self, object, name, undefined, event):", "body": "self._delete_edges(event.removed)self._add_edges(event.added)", "docstring": "Handles addition and removal of edges.", "id": "f1989:c3:m10"} {"signature": "def delete_child ( self, object, index ):", "body": "if isinstance( child, Subgraph ):object.subgraphs.pop(index)elif isinstance( child, Cluster ):object.clusters.pop( index )elif isinstance( child, Node ):object.nodes.pop( index )elif isinstance( child, Edge ):object.edges.pop( index )else:pass", "docstring": "Deletes a child at a specified index from the object's children.", "id": "f1991:c0:m4"} {"signature": "def allows_children ( self, object ):", "body": "return True", "docstring": "Returns whether this object can have children.", "id": "f1991:c0:m0"} {"signature": "def get_node(self, ID):", "body": "node = super(Graph, self).get_node(ID)if node is not None:return nodefor graph in self.all_graphs:for each_node in graph.nodes:if each_node.ID == ID:return each_nodeelse:return None", "docstring": "Returns a node given an ID or None if no such node exists.", "id": "f1992:c0:m4"} {"signature": "def _maxiter_default(self):", "body": "mode = self.modeif mode == \"\":return * len(self.nodes)elif mode == \"\":return else:return ", "docstring": "Trait initialiser.", "id": "f1992:c0:m7"} {"signature": "@on_trait_change(\"\")def redraw_canvas(self):", "body": "from xdot_parser import XdotAttrParserxdot_parser = XdotAttrParser()canvas = self._component_default()for node in self.nodes:components = xdot_parser.parse_xdot_data( node._draw_ )canvas.add( *components )components = xdot_parser.parse_xdot_data( node._ldraw_ )canvas.add( *components )for edge in self.edges:components = xdot_parser.parse_xdot_data( edge._draw_ )canvas.add( *components )components = xdot_parser.parse_xdot_data( edge._ldraw_ )canvas.add( *components )components = xdot_parser.parse_xdot_data( edge._hdraw_ )canvas.add( *components )components = xdot_parser.parse_xdot_data( edge._tdraw_ )canvas.add( *components )components = xdot_parser.parse_xdot_data( edge._hldraw_ )canvas.add( *components )components = xdot_parser.parse_xdot_data( edge._tldraw_ )canvas.add( *components )self.component = canvasself.vp.request_redraw()", "docstring": "Parses the Xdot attributes of all graph components and adds\n the components to a new canvas.", "id": "f1992:c0:m3"} {"signature": "def _on_edges(self, object, name, old, new):", "body": "if name == \"\":edges = new.addedelif name == \"\":edges = newelse:edges = []all_nodes = [n for g in self.all_graphs for n in g.nodes]for each_edge in edges:if each_edge.tail_node not in all_nodes:object.nodes.append( each_edge.tail_node )if each_edge.head_node not in all_nodes:object.nodes.append( each_edge.head_node )each_edge._nodes = all_nodes", "docstring": "Handles the list of edges for any graph changing.", "id": "f1992:c0:m11"} {"signature": "def _directed_changed(self, new):", "body": "if new:conn = \"\"else:conn = \"\"for edge in [e for g in self.all_graphs for e in g.edges]:edge.conn = conn", "docstring": "Sets the connection string for all edges.", "id": "f1992:c0:m9"} {"signature": "def proc_filled_ellipse(self, tokens):", "body": "return self._proc_ellipse(tokens, filled=True)", "docstring": "Returns the components of a filled ellipse.", "id": "f1994:c0:m8"} {"signature": "def _proc_color(self, tokens):", "body": "keys = list(tokens.keys())if \"\" in keys: rr, gg, bb = tokens[\"\"], tokens[\"\"], tokens[\"\"]hex2int = lambda h: int(h, )if \"\" in keys:a = tokens[\"\"]c = str((hex2int(rr), hex2int(gg), hex2int(bb), hex2int(a)))else:c = str((hex2int(rr), hex2int(gg), hex2int(bb)))elif \"\" in keys: r, g, b = hsv_to_rgb(tokens[\"\"],tokens[\"\"],tokens[\"\"])c = str((int(r*), int(g*), int(b*)))else:c = tokens[\"\"]return c", "docstring": "The color traits of a Pen instance must be a string of the form\n (r,g,b) or (r,g,b,a) where r, g, b, and a are integers from 0 to 255,\n a wx.Colour instance, an integer which in hex is of the form 0xRRGGBB,\n where RR is red, GG is green, and BB is blue or a valid color name.", "id": "f1994:c0:m5"} {"signature": "def proc_stroke_color(self, tokens):", "body": "self.pen.color = self._proc_color(tokens)return []", "docstring": "Sets the pen stroke color.", "id": "f1994:c0:m4"} {"signature": "def parse_xdot_data(self, data):", "body": "parser = self.parserif pyparsing_version >= \"\":parser.parseWithTabs()if data:return parser.parseString(data)else:return []", "docstring": "Parses xdot data and returns the associated components.", "id": "f1994:c0:m1"} {"signature": "def proc_unfilled_polygon(self, tokens):", "body": "return self._proc_polygon(tokens, filled=False)", "docstring": "Returns the components of an unfilled polygon.", "id": "f1994:c0:m12"} {"signature": "def __str__(self):", "body": "attrs = []for trait_name, trait in self.traits(graphviz=True).items():value = getattr(self, trait_name)if value != trait.default:trait = self.trait( trait_name )if trait.is_trait_type( Tuple ):valstr = '' % \"\".join( [str(d) for d in value] )elif isinstance( value, str ):valstr = '' % valueelse:valstr = str( value )attrs.append('' % (trait_name, valstr))if attrs:attrstr = \"\" % \"\".join(attrs)return \"\" % (self.ID, attrstr)else:return \"\" % self.ID", "docstring": "Returns a string representation of the node.", "id": "f1995:c0:m1"} {"signature": "def parse_xdot_drawing_directive(self, new):", "body": "components = XdotAttrParser().parse_xdot_data(new)max_x = max( [c.bounds[] for c in components] + [] )max_y = max( [c.bounds[] for c in components] + [] )pos_x = min( [c.x for c in components] )pos_y = min( [c.y for c in components] )move_to_origin(components)container = Container(auto_size=True,position=[pos_x-self.pos[], pos_y-self.pos[]],bgcolor=\"\")self.bounds = bounds=[max_x, max_y]container = Container(fit_window=False, auto_size=True, bgcolor=\"\")container.add( *components )self.drawing = container", "docstring": "Parses the drawing directive, updating the node components.", "id": "f1995:c0:m7"} {"signature": "def __hash__(self):", "body": "return hash(self.ID)", "docstring": "objects which compare equal have the same hash value.", "id": "f1995:c0:m2"} {"signature": "def _label_drawing_changed(self, old, new):", "body": "if old is not None:self.component.remove(old)if new is not None:self.component.add(new)w, h = self.component.boundsself.component.position = [ self.pos[] - (w/), self.pos[] - (h/) ]self.component.position = list( self.pos )self.component.request_redraw()", "docstring": "Handles the container of label components changing.", "id": "f1995:c0:m10"} {"signature": "def map_element(self, obj, name, event):", "body": "canvas = self.diagram.diagram_canvasparser = XDotParser()for element in event.added:logger.debug(\"\" % element)for node_mapping in self.nodes:ct = name[:-] if node_mapping.containment_trait == ct:dot_attrs = node_mapping.dot_nodedot = Dot()graph_node = Node(str(id(element)))self._style_node(graph_node, dot_attrs)dot.add_node(graph_node)xdot = graph_from_dot_data(dot.create(self.program,\"\"))diagram_nodes = parser.parse_nodes(xdot)for dn in diagram_nodes:if dn is not None:dn.element = elementfor tool in node_mapping.tools:dn.tools.append(tool(dn))canvas.add(dn)canvas.request_redraw()for element in event.removed:logger.debug(\"\" % element)for component in canvas.components:if element == component.element:canvas.remove(component)canvas.request_redraw()break", "docstring": "Handles mapping elements to diagram components", "id": "f1996:c4:m4"} {"signature": "def clear_canvas(self):", "body": "logger.debug(\"\")old_canvas = self.diagram_canvaslogger.debug(\"\" % canvas.components)for component in canvas.components:canvas.remove(component)logger.debug(\"\" % canvas.components)for component in canvas.components:canvas.remove(component)logger.debug(\"\" % canvas.components)canvas.request_redraw()new_canvas = Canvas()new_canvas.copy_traits(old_canvas, [\"\", \"\"])self.diagram_canvas = new_canvasself.viewport.component=new_canvasself.viewport.request_redraw()return", "docstring": "Removes all components from the canvas", "id": "f1996:c0:m3"} {"signature": "def _viewport_default(self):", "body": "vp = Viewport(component=self.diagram_canvas, enable_zoom=True)vp.view_position = [,]vp.tools.append(ViewportPanTool(vp))return vp", "docstring": "Trait initialiser", "id": "f1996:c0:m1"} {"signature": "def _diagram_canvas_default(self):", "body": "canvas = Canvas()for tool in self.tools:canvas.tools.append(tool(canvas))return canvas", "docstring": "Trait initialiser", "id": "f1996:c0:m0"} {"signature": "@on_trait_change(\"\")def _diagram_canvas_changed(self, new):", "body": "logger.debug(\"\")canvas = self.diagram_canvasfor tool in self.tools:if canvas is not None:print(\"\" % tool)canvas.tools.append(tool(canvas))", "docstring": "Handles the diagram canvas being set", "id": "f1996:c0:m2"} {"signature": "def __str__(self):", "body": "s = \"\"return \"\" % ( s, super(Cluster, self).__str__() )", "docstring": "Returns a string representation of the cluster in dot language.", "id": "f1997:c0:m0"} {"signature": "def _labelloc_default(self):", "body": "return \"\"", "docstring": "Trait initialiser.", "id": "f1997:c0:m1"} {"signature": "def normal_left_dclick(self, event):", "body": "x = event.xy = event.ycandidates = []component = self.componentif isinstance(component, Container):candidates = get_nested_components(self.component)else:returnitem = Nonefor candidate, offset in candidates:if candidate.is_in(x-offset[], y-offset[]):item = candidatebreakif hasattr(component, \"\"):if component.element is not None:component.active_tool = selfcomponent.element.edit_traits(kind=\"\")event.handled = Truecomponent.active_tool = Nonecomponent.request_redraw()return", "docstring": "Handles the left mouse button being double-clicked when the tool\n is in the 'normal' state.\n\n If the event occurred on this tool's component (or any contained\n component of that component), the method opens a Traits UI view on the\n object referenced by the 'element' trait of the component that was\n double-clicked, setting the tool as the active tool for the duration\n of the view.", "id": "f1998:c0:m0"} {"signature": "def normal_right_down(self, event):", "body": "x = event.xy = event.ycandidates = []component = self.componentif isinstance(component, Container):candidates = get_nested_components(self.component)else:returnitem = Nonefor candidate, offset in candidates:if candidate.is_in(x-offset[], y-offset[]):item = candidatebreakfor tool in component.tools:component.active_tool = selfevent.handled = Truecomponent.active_tool = Nonecomponent.request_redraw()return", "docstring": "Handles the right mouse button being clicked when the tool is in\n the 'normal' state.\n\n If the event occurred on this tool's component (or any contained\n component of that component), the method opens a context menu with\n menu items from any tool of the parent component that implements\n MenuItemTool interface i.e. has a get_item() method.", "id": "f2000:c1:m0"} {"signature": "def setUp(self):", "body": "self.subcommand = Workspace()self.subcommand_str = \"\"super(TestSubcommandWorkspace, self).setUp()config_data = {\"\": {\"\": {\"\": \"\",\"\": {\"\": \"\"}}}}self.config.update(config_data)", "docstring": "Setup test suite.", "id": "f2006:c0:m0"} {"signature": "def setUp(self):", "body": "self.subcommand = Jump()self.subcommand_str = \"\"super(TestSubcommandJump, self).setUp()", "docstring": "Setup test suite.", "id": "f2008:c0:m0"} {"signature": "def setUp(self):", "body": "super(TestSvn, self).setUp(Svn)", "docstring": "Set up svn adapter and sandbox.", "id": "f2014:c0:m0"} {"signature": "def mkdir(self, directory):", "body": "os.mkdir(os.path.join(self.path, directory))", "docstring": "Create directory in sandbox..", "id": "f2016:c0:m1"} {"signature": "def __init__(self, path=None):", "body": "if path is None:path = os.path.dirname(os.path.realpath(__file__)) + \"\"self.path = pathif os.path.exists(path):self.destroy()os.mkdir(path)", "docstring": "Init sandbox environment.", "id": "f2016:c0:m0"} {"signature": "def setUp(self, adapter):", "body": "super(AdapterTestHelper, self).setUp()self.sandbox.mkdir(\"\")self.adapter = adapter(os.path.join(self.sandbox.path, \"\"))self.adapter.execute = Mock(return_value=None)", "docstring": "Generic setup for adapter test cases.", "id": "f2016:c3:m0"} {"signature": "def assert_config_file_contains(self, config_file, expected):", "body": "file = open(config_file)config = yaml.safe_load(file.read())file.close()self.assertEqual(config, expected)", "docstring": "Custom assert to check content of config_file.", "id": "f2016:c1:m2"} {"signature": "def assert_executed_command(self, expected_cmd, with_path=True):", "body": "if not with_path:self.adapter.execute.assert_called_once_with(expected_cmd)else:self.adapter.execute.assert_called_once_with(expected_cmd,os.path.join(self.sandbox.path, \"\"))", "docstring": "Assert that adapter has executed expected command.", "id": "f2016:c3:m2"} {"signature": "def assert_subcommand_parsing_raises_error(self, commands, error_expected):", "body": "self.subcommand.parse()self.assertRaises(error_expected,self.parser.parse_args, commands)", "docstring": "This method provides a way to assert subcommand parsing.", "id": "f2016:c2:m3"} {"signature": "def touch(self, file):", "body": "full_path = os.path.join(self.path, file)with open(full_path, ''):os.utime(full_path, None)", "docstring": "Create file into sandbox.", "id": "f2016:c0:m2"} {"signature": "def setUp(self):", "body": "super(SubcommandTestHelper, self).setUp()self.parser = argparse.ArgumentParser(prog=\"\")self.subparser = self.parser.add_subparsers(dest=\"\")self.config = Config(self.sandbox.path + \"\")if self.subcommand is not None and self.subcommand_str is not None:self.subcommand.setup(self.subcommand_str, self.config, self.subparser)", "docstring": "Setup test suite.", "id": "f2016:c2:m0"} {"signature": "def write(self):", "body": "file = open(self.config_file, \"\")file.write(yaml.dump(dict(self), default_flow_style=False))file.close()", "docstring": "Write config in configuration file.\nData must me a dict.", "id": "f2020:c0:m4"} {"signature": "def __init__(self, config_file, *args, **kwargs):", "body": "self.config_file = config_filesuper(Config, self).__init__(*args, **kwargs)if not os.path.exists(config_file) or config_file is None:self.update({\"\": {}})self.write()else:file = open(self.config_file)config = yaml.safe_load(file.read())file.close()if config is not None:self.update(config)", "docstring": "Workspace object initialization.", "id": "f2020:c0:m0"} {"signature": "def execute(self, args):", "body": "if args.name is not None:self.print_workspace(args.name)elif args.all is not None:self.print_all()", "docstring": "Execute update subcommand.", "id": "f2023:c0:m2"} {"signature": "def parse(self):", "body": "parser = self.subparser.add_parser(\"\",help=\"\",description=\"\")group = parser.add_mutually_exclusive_group(required=True)group.add_argument('', action='', help=\"\")group.add_argument('', type=str, help=\"\", nargs='')", "docstring": "Parse update subcommand.", "id": "f2023:c0:m1"} {"signature": "def print_workspace(self, name):", "body": "path_list = find_path(name, self.config)if len(path_list) == :self.logger.error(\"\" % name)return Falsefor name, path in path_list.items():self.print_update(name, path)", "docstring": "Print workspace update.", "id": "f2023:c0:m4"} {"signature": "def __init__(self, name, subparser, config):", "body": "self.name = nameself.parser = subparser.add_parser(name,description=\"\" % name)self.config = config", "docstring": "Initialize workspace name.", "id": "f2025:c1:m0"} {"signature": "def show_all(self):", "body": "for ws in self.workspace.list().keys():self.show_workspace(ws)print(\"\")", "docstring": "Show details for all workspaces.", "id": "f2026:c0:m4"} {"signature": "def parse(self):", "body": "parser = self.subparser.add_parser(\"\",help=\"\",description=\"\")group = parser.add_mutually_exclusive_group(required=True)group.add_argument('', action='', help=\"\")group.add_argument('', type=str, help=\"\", nargs='')", "docstring": "Parse show subcommand.", "id": "f2026:c0:m1"} {"signature": "def print_workspace(self, name):", "body": "path_list = find_path(name, self.config)if len(path_list) == :self.logger.error(\"\" % name)return Falsefor name, path in path_list.items():self.print_status(name, path)", "docstring": "Print workspace status.", "id": "f2027:c0:m4"} {"signature": "def get_version():", "body": "requirement = pkg_resources.Requirement.parse(\"\")provider = pkg_resources.get_provider(requirement)return provider.version", "docstring": "Get version from package resources.", "id": "f2028:m0"} {"signature": "def clone(self, url):", "body": "return self.execute(\"\" % (self.executable,url, self.path))", "docstring": "Clone repository from url.", "id": "f2030:c0:m2"} {"signature": "def clone(self, url):", "body": "return self.execute(\"\" % (url, self.path))", "docstring": "Clone repository from url.", "id": "f2031:c0:m1"} {"signature": "def clone(self, url):", "body": "return self.execute(\"\" % (self.executable,url, self.path))", "docstring": "Clone repository from url.", "id": "f2032:c0:m2"} {"signature": "def update(self):", "body": "return self.exec_on_path(\"\" % self.executable)", "docstring": "Update repository.", "id": "f2032:c0:m1"} {"signature": "def clone(self, url):", "body": "return self.execute(\"\" % (url, self.path))", "docstring": "Checkout repository from url.", "id": "f2035:c0:m1"} {"signature": "def get(self, name):", "body": "ws_list = self.list()return ws_list[name] if name in ws_list else None", "docstring": "Get workspace infos from name.\nReturn None if workspace doesn't exists.", "id": "f2038:c0:m4"} {"signature": "def simple_func1():", "body": "pass", "docstring": "Example:\n >>> pass", "id": "f2045:m0"} {"signature": "@classmethoddef method1(cls):", "body": "pass", "docstring": "Example:\n >>> pass", "id": "f2045:c1:m1"} {"signature": "def multiple_eval_for_loops_v2():", "body": "", "docstring": "However, xdoctest can handle this as long as you print to stdout\n\n>>> for i in range(2):\n... print('%s' % i)\n...\n0\n1", "id": "f2056:m5"} {"signature": "def multiline_madness():", "body": "pass", "docstring": ">>> if True:\n>>> print('doctest requires a special ... prefix')\ndoctest requires a special ... prefix", "id": "f2056:m0"} {"signature": "def embeded_triple_quotes():", "body": "pass", "docstring": ">>> x = '''\n xdoctest is good at dealing with triple quoted strings\n you don't even need to have the >>> prefix, because the\n AST knows you are in a string context\n '''\n>>> print(x)\nxdoctest is good at dealing with triple quoted strings\nyou don't even need to have the >>> prefix, because the\nAST knows you are in a string context", "id": "f2056:m1"} {"signature": "def demo2():", "body": "pass", "docstring": "CommandLine:\n xdoctest -m ~/code/xdoctest/dev/demo_errors.py demo2\n\nExample:\n >>> print('error on different line')\n >>> raise Exception('demo2')", "id": "f2058:m1"} {"signature": "def modify_conf():", "body": "import redbaronimport ubelt as ubconf_path = ''source = ub.readfrom(conf_path)red = redbaron.RedBaron(source)extra_extensions = ['']ext_node = red.find('', value='').parentext_node.value.value.extend(extra_extensions)theme_node = red.find('', value='').parenttheme_node.value.value = ''ub.writeto(conf_path, red.dumps())", "docstring": "pip install redbaron", "id": "f2059:m1"} {"signature": "def get_stack_frame(n=, strict=True):", "body": "frame_cur = inspect.currentframe()for ix in range(n + ):frame_next = frame_cur.f_backif frame_next is None: if strict:raise AssertionError('' % ix)else:breakframe_cur = frame_nextreturn frame_cur", "docstring": "Gets the current stack frame or any of its ancestors dynamically\n\nArgs:\n n (int): n=0 means the frame you called this function in.\n n=1 is the parent frame.\n strict (bool): (default = True)\n\nReturns:\n frame: frame_cur\n\nExample:\n >>> frame_cur = get_stack_frame(n=0)\n >>> print('frame_cur = %r' % (frame_cur,))\n >>> assert frame_cur.f_globals['frame_cur'] is frame_cur", "id": "f2060:m1"} {"signature": "def parse_dynamic_calldefs(modpath=None):", "body": "from xdoctest import static_analysis as staticfrom xdoctest import utils module = utils.import_module_from_path(modpath)calldefs = {}if getattr(module, ''):calldefs[''] = static.CallDefNode(callname='',docstr=module.__doc__,lineno=,doclineno=,doclineno_end=,args=None)for key, val in iter_module_doctestables(module):if hasattr(val, '') and hasattr(val, ''):calldefs[key] = static.CallDefNode(callname=val.__name__,docstr=val.__doc__,lineno=,doclineno=,doclineno_end=,args=None)return calldefs", "docstring": "Dynamic parsing of module doctestable items.\n\nWhile this does execute module code it is needed for testing extension\nlibraries.\n\nCommandLine:\n python -m xdoctest.dynamic_analysis parse_dynamic_calldefs\n\nExample:\n >>> from xdoctest import dynamic_analysis\n >>> module = dynamic_analysis\n >>> calldefs = parse_dynamic_calldefs(module.__file__)\n >>> for key, calldef in sorted(calldefs.items()):\n ... print('key = {!r}'.format(key))\n ... print(' * calldef.callname = {}'.format(calldef.callname))\n ... if calldef.docstr is None:\n ... print(' * len(calldef.docstr) = {}'.format(calldef.docstr))\n ... else:\n ... print(' * len(calldef.docstr) = {}'.format(len(calldef.docstr)))", "id": "f2060:m0"} {"signature": "def is_defined_by_module(item, module):", "body": "from xdoctest import static_analysis as statictarget_modname = module.__name__flag = Falseif isinstance(item, types.ModuleType):if not hasattr(item, ''):try:name = static.modpath_to_modname(module.__file__)flag = name in str(item)except:flag = Falseelse:item_modpath = os.path.realpath(os.path.dirname(item.__file__))mod_fpath = module.__file__.replace('', '')if not mod_fpath.endswith(''):flag = Falseelse:modpath = os.path.realpath(os.path.dirname(mod_fpath))modpath = modpath.replace('', '')flag = item_modpath.startswith(modpath)else:if isinstance(item, property):item = item.fgetif isinstance(item, staticmethod):item = item.__func__if isinstance(item, classmethod):item = item.__func__if getattr(item, '', None) == target_modname:flag = Trueelif hasattr(item, ''):parent = item.__objclass__if getattr(parent, '', None) == target_modname:flag = Trueif not flag:try:item_modname = _func_globals(item)['']if item_modname == target_modname:flag = Trueexcept AttributeError:passreturn flag", "docstring": "Check if item is directly defined by a module.\n\nThis check may not always work, especially for decorated functions.\n\nExample:\n >>> from xdoctest import dynamic_analysis\n >>> item = dynamic_analysis.is_defined_by_module\n >>> module = dynamic_analysis\n >>> assert is_defined_by_module(item, module)\n >>> item = dynamic_analysis.six\n >>> assert not is_defined_by_module(item, module)\n >>> item = dynamic_analysis.six.print_\n >>> assert not is_defined_by_module(item, module)\n >>> assert not is_defined_by_module(print, module)\n >>> import _ctypes\n >>> item = _ctypes.Array\n >>> module = _ctypes\n >>> assert is_defined_by_module(item, module)\n >>> item = _ctypes.CFuncPtr.restype\n >>> module = _ctypes\n >>> assert is_defined_by_module(item, module)", "id": "f2060:m5"} {"signature": "def check_got_vs_want(want, got_stdout, got_eval=constants.NOT_EVALED,runstate=None):", "body": "if got_eval is constants.NOT_EVALED:got = got_stdoutflag = check_output(got, want, runstate)else:if not got_stdout:got = repr(got_eval)flag = check_output(got, want, runstate)else:got = got_stdoutflag = check_output(got, want, runstate)if not flag:got = repr(got_eval)flag = check_output(got, want, runstate)if not flag:got = got_stdoutif not flag:msg = ''ex = GotWantException(msg, got, want)raise exreturn flag", "docstring": "Determines to check against either got_stdout or got_eval, and then does\nthe comparison.\n\nIf both stdout and eval \"got\" outputs are specified, then the \"want\"\ntarget may match either value.\n\nArgs:\n want (str): target to match against\n got_stdout (str): output from stdout\n got_eval (str): output from an eval statement.\n\nRaises:\n GotWantException - If the \"got\" differs from this parts want.", "id": "f2061:m0"} {"signature": "def output_repr_difference(self, runstate=None):", "body": "minimal_got = self.got.rstrip()minimal_want = self.want.rstrip()if runstate is None:runstate = directive.RuntimeState()runstate_ = runstate.to_dict()if not runstate_['']:minimal_want = remove_blankline_marker(minimal_want)lines = [(''),(''.format(minimal_got)),(''.format(minimal_want)),]return ''.join(lines)", "docstring": "Constructs a repr difference with minimal normalization.", "id": "f2061:c0:m3"} {"signature": "def _label_docsrc_lines(self, string):", "body": "def _complete_source(line, state_indent, line_iter):\"\"\"\"\"\"norm_line = line[state_indent:] prefix = norm_line[:]suffix = norm_line[:]assert prefix.strip() in {'', ''}, ''.format(prefix)yield linesource_parts = [suffix]HACK_TRIPLE_QUOTE_FIX = Truetry:while not static.is_balanced_statement(source_parts, only_tokens=True):line_idx, next_line = next(line_iter)norm_line = next_line[state_indent:]prefix = norm_line[:]suffix = norm_line[:]if prefix.strip() not in {'', '', ''}: error = Trueif HACK_TRIPLE_QUOTE_FIX:if any(\"\" in s or '' in s for s in source_parts):next_line = next_line[:state_indent] + '' + norm_linenorm_line = '' + norm_lineprefix = ''suffix = norm_lineerror = Falseif error:if DEBUG:print('')print(''.format(source_parts))print(''.format(prefix))print(''.format(norm_line))print('')raise SyntaxError(''.format(line_idx, next_line))source_parts.append(suffix)yield next_lineexcept StopIteration:if DEBUG:import ubelt as ubprint('')import tracebacktb_text = traceback.format_exc()tb_text = ub.highlight_code(tb_text)tb_text = ub.indent(tb_text)print(tb_text)print(''.format(line_iter))print(''.format(state_indent))print(''.format(line))print('')print(''.format(ub.repr2(source_parts, nl=)))print(ub.codeblock(r''''''))print('')raise IncompleteParseError('''')else:if DEBUG > :import ubelt as ubprint('')print(''.format(line_iter))print(''.format(ub.repr2(source_parts, nl=)))print('')labeled_lines = []state_indent = TEXT = ''DSRC = ''WANT = ''prev_state = TEXTcurr_state = Noneline_iter = enumerate(string.splitlines())def hasprefix(line, prefixes):if not isinstance(prefixes, tuple):prefixes = [prefixes]return any(line == p or line.startswith(p + '') for p in prefixes)for line_idx, line in line_iter:match = INDENT_RE.search(line)line_indent = if match is None else (match.end() - match.start())norm_line = line[state_indent:] strip_line = line.strip()if prev_state == TEXT:if hasprefix(strip_line, ''):curr_state = DSRCelse:curr_state = TEXTelif prev_state == WANT:if len(strip_line) == :curr_state = TEXTelif hasprefix(line.strip(), ''):curr_state = DSRCelif line_indent < state_indent:curr_state = TEXTelse:curr_state = WANTelif prev_state == DSRC: if len(strip_line) == or line_indent < state_indent:curr_state = TEXTelif hasprefix(norm_line, ('', '')):if strip_line == '':curr_state = WANTelse:curr_state = DSRCelse:curr_state = WANTelse: raise AssertionError(''.format(prev_state))if prev_state != curr_state:if curr_state == TEXT:state_indent = if curr_state == DSRC:state_indent = line_indentnorm_line = line[state_indent:]if curr_state == DSRC:try:for part in _complete_source(line, state_indent, line_iter):labeled_lines.append((DSRC, part))except IncompleteParseError as orig_ex:raiseexcept SyntaxError as orig_ex:if DEBUG:print('')print(''.format(line_iter))print(''.format(state_indent))print(''.format(line))print('')print('')for line in labeled_lines:print(line)print('')print('')raiseelif curr_state == WANT:labeled_lines.append((WANT, line))elif curr_state == TEXT:labeled_lines.append((TEXT, line))prev_state = curr_stateif DEBUG > :import ubelt as ubprint('')print(''.format(ub.repr2(labeled_lines, nl=)))print('')return labeled_lines", "docstring": "Example:\n >>> from xdoctest.parser import *\n >>> # Having multiline strings in doctests can be nice\n >>> string = utils.codeblock(\n '''\n text\n >>> items = ['also', 'nice', 'to', 'not', 'worry',\n >>> 'about', '...', 'vs', '>>>']\n ... print('but its still allowed')\n but its still allowed\n\n more text\n ''')\n >>> self = DoctestParser()\n >>> labeled = self._label_docsrc_lines(string)\n >>> expected = [\n >>> ('text', 'text'),\n >>> ('dsrc', \">>> items = ['also', 'nice', 'to', 'not', 'worry',\"),\n >>> ('dsrc', \">>> 'about', '...', 'vs', '>>>']\"),\n >>> ('dsrc', \"... print('but its still allowed')\"),\n >>> ('want', 'but its still allowed'),\n >>> ('text', ''),\n >>> ('text', 'more text')\n >>> ]\n >>> assert labeled == expected", "id": "f2062:c1:m7"} {"signature": "def _package_chunk(self, raw_source_lines, raw_want_lines, lineno=):", "body": "if DEBUG > :print('')match = INDENT_RE.search(raw_source_lines[])line_indent = if match is None else (match.end() - match.start())source_lines = [p[line_indent:] for p in raw_source_lines]want_lines = [p[line_indent:] for p in raw_want_lines]exec_source_lines = [p[:] for p in source_lines]if DEBUG > :print('')ps1_linenos, eval_final = self._locate_ps1_linenos(source_lines)if DEBUG > :print('')break_linenos = []ps1_to_directive = {}for s1, s2 in zip(ps1_linenos, ps1_linenos[:] + [None]):lines = exec_source_lines[s1:s2]directives = list(directive.Directive.extract(''.join(lines)))if directives:ps1_to_directive[s1] = directivesbreak_linenos.append(s1)if directives[].inline:if s2 is not None:break_linenos.append(s2)def slice_example(s1, s2, want_lines=None):exec_lines = exec_source_lines[s1:s2]orig_lines = source_lines[s1:s2]directives = ps1_to_directive.get(s1, None)example = doctest_part.DoctestPart(exec_lines,want_lines=want_lines,orig_lines=orig_lines,line_offset=lineno + s1,directives=directives)return examples1 = s2 = if self.simulate_repl:for s1, s2 in zip(ps1_linenos, ps1_linenos[:]):example = slice_example(s1, s2)yield examples1 = s2else:if break_linenos:break_linenos = sorted(set([] + break_linenos))for s1, s2 in zip(break_linenos, break_linenos[:]):example = slice_example(s1, s2)yield examples1 = s2if want_lines and eval_final:s2 = ps1_linenos[-]if s2 != s1: example = slice_example(s1, s2)yield examples1 = s2s2 = Noneexample = slice_example(s1, s2, want_lines)example.use_eval = bool(want_lines) and eval_finalif DEBUG > :print('')yield example", "docstring": "if `self.simulate_repl` is True, then each statement is broken into its\nown part. Otherwise, statements are grouped by the closest `want`\nstatement.\n\nExample:\n >>> from xdoctest.parser import *\n >>> raw_source_lines = ['>>> \"string\"']\n >>> raw_want_lines = ['string']\n >>> self = DoctestParser()\n >>> part, = self._package_chunk(raw_source_lines, raw_want_lines)\n >>> part.source\n '\"string\"'\n >>> part.want\n 'string'", "id": "f2062:c1:m3"} {"signature": "def parse(self, string, info=None):", "body": "if DEBUG > :print('')if sys.version_info.major == : string = utils.ensure_unicode(string)if not isinstance(string, six.string_types):raise TypeError(''.format(string))string = string.expandtabs()min_indent = min_indentation(string)if min_indent > :string = ''.join([l[min_indent:] for l in string.splitlines()])labeled_lines = Nonegrouped_lines = Noneall_parts = Nonetry:labeled_lines = self._label_docsrc_lines(string)grouped_lines = self._group_labeled_lines(labeled_lines)all_parts = list(self._package_groups(grouped_lines))except Exception as orig_ex:if labeled_lines is None:failpoint = ''elif grouped_lines is None:failpoint = ''elif all_parts is None:failpoint = ''if DEBUG:print('')print('')print(''.format(failpoint))import ubelt as ubimport tracebacktb_text = traceback.format_exc()tb_text = ub.highlight_code(tb_text)tb_text = ub.indent(tb_text)print(tb_text)print('')print(string)print('')print(''.format(ub.repr2(info)))print('')print(''.format(orig_ex))print(''.format(ub.repr2(labeled_lines)))print(''.format(ub.repr2(grouped_lines, nl=)))print(''.format(ub.repr2(all_parts)))print('')raise exceptions.DoctestParseError(''.format(failpoint),string=string, info=info, orig_ex=orig_ex)if DEBUG > :print('')return all_parts", "docstring": "Divide the given string into examples and interleaving text.\n\nArgs:\n string (str): string representing the doctest\n info (dict): info about where the string came from in case of an\n error\n\nReturns:\n list : a list of `DoctestPart` objects\n\nCommandLine:\n python -m xdoctest.parser DoctestParser.parse\n\nExample:\n >>> s = 'I am a dummy example with two parts'\n >>> x = 10\n >>> print(s)\n I am a dummy example with two parts\n >>> s = 'My purpose it so demonstrate how wants work here'\n >>> print('The new want applies ONLY to stdout')\n >>> print('given before the last want')\n >>> '''\n this wont hurt the test at all\n even though its multiline '''\n >>> y = 20\n The new want applies ONLY to stdout\n given before the last want\n >>> # Parts from previous examples are executed in the same context\n >>> print(x + y)\n 30\n\n this is simply text, and doesnt apply to the previous doctest the\n directive is still in effect.\n\nExample:\n >>> from xdoctest import parser\n >>> from xdoctest.docstr import docscrape_google\n >>> from xdoctest import core\n >>> self = parser.DoctestParser()\n >>> docstr = self.parse.__doc__\n >>> blocks = docscrape_google.split_google_docblocks(docstr)\n >>> doclineno = self.parse.__func__.__code__.co_firstlineno\n >>> key, (string, offset) = blocks[-2]\n >>> self._label_docsrc_lines(string)\n >>> doctest_parts = self.parse(string)\n >>> # each part with a want-string needs to be broken in two\n >>> assert len(doctest_parts) == 6", "id": "f2062:c1:m1"} {"signature": "def parse_google_returns(docstr, return_annot=None):", "body": "blocks = split_google_docblocks(docstr)for key, block in blocks:lines = block[]if key == '':for retdict in parse_google_retblock(lines, return_annot):yield retdictif key == '':for retdict in parse_google_retblock(lines, return_annot):yield retdict", "docstring": "r\"\"\"\n Generates dictionaries of possible return hints based on a google docstring\n\n Args:\n docstr (str): a google-style docstring\n return_annot (str): the return type annotation (if one exists)\n\n Yields:\n Dict[str, str]: dictionaries of return value hints\n\n Example:\n >>> docstr = parse_google_returns.__doc__\n >>> retdict_list = list(parse_google_returns(docstr))\n >>> print([sorted(d.items()) for d in retdict_list])\n [[('desc', 'dictionaries of return value hints'), ('type', 'Dict[str, str]')]]\n\n Example:\n >>> docstr = split_google_docblocks.__doc__\n >>> retdict_list = list(parse_google_returns(docstr))\n >>> print([sorted(d.items())[1] for d in retdict_list])\n [('type', 'List[Tuple]')]", "id": "f2063:m1"} {"signature": "def parse_google_args(docstr):", "body": "blocks = split_google_docblocks(docstr)for key, block in blocks:lines = block[]if key == '':for argdict in parse_google_argblock(lines):yield argdict", "docstring": "r\"\"\"\n Generates dictionaries of argument hints based on a google docstring\n\n Args:\n docstr (str): a google-style docstring\n\n Yields:\n Dict[str, str]: dictionaries of parameter hints\n\n Example:\n >>> docstr = parse_google_args.__doc__\n >>> argdict_list = list(parse_google_args(docstr))\n >>> print([sorted(d.items()) for d in argdict_list])\n [[('desc', 'a google-style docstring'), ('name', 'docstr'), ('type', 'str')]]", "id": "f2063:m0"} {"signature": "@propertydef directives(self):", "body": "if self._directives is None:self._directives = list(directive.Directive.extract(self.source))return self._directives", "docstring": "CommandLine:\n python -m xdoctest.parser DoctestPart.directives\n\nExample:\n >>> self = DoctestPart(['# doctest: +SKIP'], None, 0)\n >>> print(', '.join(list(map(str, self.directives))))\n ", "id": "f2065:c0:m5"} {"signature": "@propertydef node(self):", "body": "return self.modpath + '' + self.callname + '' + str(self.num)", "docstring": "this pytest node", "id": "f2066:c1:m6"} {"signature": "def _color(self, text, color, enabled=None):", "body": "colored = self.config.getvalue('', enabled)if colored:text = utils.color_text(text, color)return text", "docstring": "conditionally color text based on config and flags", "id": "f2066:c1:m24"} {"signature": "def format_src(self, linenos=True, colored=None, want=True,offset_linenos=None, prefix=True):", "body": "formated_parts = list(self.format_parts(linenos=linenos,colored=colored, want=want,offset_linenos=offset_linenos,prefix=prefix))full_source = ''.join(formated_parts)return full_source", "docstring": "Adds prefix and line numbers to a doctest\n\nArgs:\n linenos (bool): if True, adds line numbers to output\n\n colored (bool): if True highlight text with ansi colors. Default\n is specified in the config.\n\n want (bool): if True includes \"want\" lines (default False).\n\n offset_linenos (bool): if True offset line numbers to agree with\n their position in the source text file (default False).\n\n prefix (bool): if False, exclude the doctest `>>> ` prefix\n\nExample:\n >>> from xdoctest.core import *\n >>> from xdoctest import core\n >>> testables = parse_doctestables(core.__file__)\n >>> self = next(testables)\n >>> self._parse()\n >>> print(self.format_src())\n >>> print(self.format_src(linenos=False, colored=False))\n >>> assert not self.is_disabled()", "id": "f2066:c1:m10"} {"signature": "def _pkgutil_submodule_names(modpath, with_pkg=False, with_mod=True):", "body": "package_name = modpath_to_modname(modpath)if isfile(modpath):yield package_nameelse:import pkgutilprefix = package_name + ''walker = pkgutil.walk_packages([modpath], prefix=prefix,onerror=lambda x: None) for importer, modname, ispkg in walker:if not ispkg and with_mod:yield modnameelif ispkg and with_pkg:yield modname", "docstring": "Ignore:\n x = sorted(submodule_paths(modname_to_modpath('ubelt')))\n y = sorted(_pkgutil_submodule_names(modname_to_modpath('ubelt')))\n x = [modpath_to_modname(p, hide_init=False, hide_main=False) for p in x]\n print('x = {!r}'.format(x))\n print('y = {!r}'.format(y))\n\nNotes:\n this will take into account pyc files, we choose not to.", "id": "f2068:m0"} {"signature": "def highlight_code(text, lexer_name='', **kwargs):", "body": "lexer_name = {'': '','': '','': '','': '','': '',}.get(lexer_name.replace('', ''), lexer_name)try:import pygmentsimport pygments.lexersimport pygments.formattersimport pygments.formatters.terminalformater = pygments.formatters.terminal.TerminalFormatter(bg='')lexer = pygments.lexers.get_lexer_by_name(lexer_name, ensurenl=False, **kwargs)new_text = pygments.highlight(text, lexer, formater)except ImportError: new_text = textreturn new_text", "docstring": "Highlights a block of text using ansi tags based on language syntax.\n\nArgs:\n text (str): plain text to highlight\n lexer_name (str): name of language\n **kwargs: passed to pygments.lexers.get_lexer_by_name\n\nReturns:\n str: text : highlighted text\n If pygments is not installed, the plain text is returned.\n\nCommandLine:\n python -c \"import pygments.formatters; print(list(pygments.formatters.get_all_formatters()))\"\n\nExample:\n >>> text = 'import xdoctest as xdoc; print(xdoc)'\n >>> new_text = highlight_code(text)\n >>> print(new_text)", "id": "f2070:m4"} {"signature": "def codeblock(block_str):", "body": "return textwrap.dedent(block_str).strip('')", "docstring": "Wraps multiline string blocks and returns unindented code.\nUseful for templated code defined in indented parts of code.\n\nArgs:\n block_str (str): typically in the form of a multiline string\n\nReturns:\n str: the unindented string\n\nExample:\n >>> # Simulate an indented part of code\n >>> if True:\n ... # notice the indentation on this will be normal\n ... codeblock_version = codeblock(\n ... '''\n ... def foo():\n ... return 'bar'\n ... '''\n ... )\n ... # notice the indentation and newlines on this will be odd\n ... normal_version = ('''\n ... def foo():\n ... return 'bar'\n ... ''')\n >>> assert normal_version != codeblock_version\n >>> print('Without codeblock')\n >>> print(normal_version)\n >>> print('With codeblock')\n >>> print(codeblock_version)", "id": "f2070:m6"} {"signature": "def add_line_numbers(source, start=, n_digits=None):", "body": "was_string = isinstance(source, six.string_types)part_lines = source.splitlines() if was_string else sourceif n_digits is None:endline = start + len(part_lines)n_digits = math.log(max(, endline), )n_digits = int(math.ceil(n_digits))src_fmt = ''part_lines = [src_fmt.format(n_digits=n_digits, count=count, line=line)for count, line in enumerate(part_lines, start=start)]if was_string:return ''.join(part_lines)else:return part_lines", "docstring": "Prefixes code with line numbers\n\nExample:\n >>> print(chr(10).join(add_line_numbers(['a', 'b', 'c'])))\n 1 a\n 2 b\n 3 c\n >>> print(add_line_numbers(chr(10).join(['a', 'b', 'c'])))\n 1 a\n 2 b\n 3 c", "id": "f2070:m5"} {"signature": "def ensuredir(dpath, mode=):", "body": "if isinstance(dpath, (list, tuple)): dpath = join(*dpath)if not exists(dpath):try:os.makedirs(normpath(dpath), mode=mode)except OSError: raisereturn dpath", "docstring": "Ensures that directory will exist. creates new dir with sticky bits by\ndefault\n\nArgs:\n dpath (str): dir to ensure. Can also be a tuple to send to join\n mode (int): octal mode of directory (default 0o1777)\n\nReturns:\n str: path - the ensured directory", "id": "f2074:m0"} {"signature": "def parse_freeform_docstr_examples(docstr, callname=None, modpath=None,lineno=, fpath=None, asone=True):", "body": "def doctest_from_parts(parts, num, curr_offset):nested = [p.orig_linesif p.want is None elsep.orig_lines + p.want.splitlines()for p in parts]docsrc = ''.join(list(it.chain.from_iterable(nested)))docsrc = textwrap.dedent(docsrc)example = doctest_example.DocTest(docsrc, modpath=modpath,callname=callname, num=num,lineno=lineno + curr_offset,fpath=fpath)unoffset = parts[].line_offsetfor p in parts:p.line_offset -= unoffsetexample._parts = partsreturn exampleif DEBUG:print(''.format(callname, modpath))respect_google_headers = Trueif respect_google_headers: special_skip_patterns = ['','','','','','','',]else:special_skip_patterns = [] special_skip_patterns_ = tuple([p.lower() for p in special_skip_patterns])def _start_ignoring(prev):return (special_skip_patterns_ andisinstance(prev, six.string_types) andprev.strip().lower().endswith(special_skip_patterns_))info = dict(callname=callname, modpath=modpath, lineno=lineno, fpath=fpath)all_parts = list(parser.DoctestParser().parse(docstr, info))curr_parts = []curr_offset = num = prev_part = Noneignoring = Falsefor part in all_parts:if isinstance(part, six.string_types):if asone:if not curr_parts:curr_offset += part.count('') + else: if curr_parts:example = doctest_from_parts(curr_parts, num, curr_offset)yield examplecurr_offset += sum(p.n_lines for p in curr_parts)num += curr_parts = []curr_offset += part.count('') + ignoring = Falseelse:if ignoring or _start_ignoring(prev_part):ignoring = Trueif asone:if not curr_parts:curr_offset += part.n_lineselse:curr_offset += part.n_lineselse:curr_parts.append(part)prev_part = partif curr_parts:example = doctest_from_parts(curr_parts, num, curr_offset)yield example", "docstring": "Finds free-form doctests in a docstring. This is similar to the original\ndoctests because these tests do not requires a google/numpy style header.\n\nSome care is taken to avoid enabling tests that look like disabled google\ndoctests / scripts.\n\nArgs:\n asone (bool): if False doctests are broken into multiple examples\n based on spacing. (default True)\n\n lineno (int): the line number (starting from 1) of the docstring.\n (i.e. if you were to go to this line number in the source file\n the starting quotes of the docstr would be on this line).\n\nRaises:\n xdoctest.exceptions.DoctestParseError: if an error occurs in parsing\n\nCommandLine:\n python -m xdoctest.core parse_freeform_docstr_examples\n\nExample:\n >>> from xdoctest import core\n >>> from xdoctest import utils\n >>> docstr = utils.codeblock(\n '''\n freeform\n >>> doctest\n >>> hasmultilines\n whoppie\n >>> 'butthis is the same doctest'\n\n >>> secondone\n\n Script:\n >>> 'special case, dont parse me'\n\n DisableDoctest:\n >>> 'special case, dont parse me'\n want\n\n AnythingElse:\n >>> 'general case, parse me'\n want\n ''')\n >>> examples = list(parse_freeform_docstr_examples(docstr, asone=True))\n >>> assert len(examples) == 1\n >>> examples = list(parse_freeform_docstr_examples(docstr, asone=False))\n >>> assert len(examples) == 3", "id": "f2078:m0"} {"signature": "def _workaround_func_lineno(self, node):", "body": "if node.decorator_list:linex = node.lineno - pattern = r'' + node.namewhile not re.match(pattern, self.sourcelines[linex]):linex += lineno = linex + else:lineno = node.linenoreturn lineno", "docstring": "Example:\n >>> source = utils.codeblock(\n '''\n @bar\n @baz\n def foo():\n 'docstr'\n ''')\n >>> self = TopLevelVisitor(source)\n >>> node = self.syntax_tree().body[0]\n >>> self._workaround_func_lineno(node)\n 3", "id": "f2083:c1:m13"} {"signature": "def package_modpaths(pkgpath, with_pkg=False, with_mod=True, followlinks=True,recursive=True, with_libs=False, check=True):", "body": "if isfile(pkgpath):yield pkgpathelse:if with_pkg:root_path = join(pkgpath, '')if not check or exists(root_path):yield root_pathvalid_exts = ['']if with_libs:valid_exts += _platform_pylib_exts()for dpath, dnames, fnames in os.walk(pkgpath, followlinks=followlinks):ispkg = exists(join(dpath, ''))if ispkg or not check:check = True if with_mod:for fname in fnames:if splitext(fname)[] in valid_exts:if fname != '':path = join(dpath, fname)yield pathif with_pkg:for dname in dnames:path = join(dpath, dname, '')if exists(path):yield pathelse:del dnames[:]if not recursive:break", "docstring": "r\"\"\"\n Finds sub-packages and sub-modules belonging to a package.\n\n Args:\n pkgpath (str): path to a module or package\n with_pkg (bool): if True includes package __init__ files (default =\n False)\n with_mod (bool): if True includes module files (default = True)\n exclude (list): ignores any module that matches any of these patterns\n recursive (bool): if False, then only child modules are included\n with_libs (bool): if True then compiled shared libs will be returned as well\n check (bool): if False, then then pkgpath is considered a module even\n if it does not contain an __init__ file.\n\n Yields:\n str: module names belonging to the package\n\n References:\n http://stackoverflow.com/questions/1707709/list-modules-in-py-package\n\n Example:\n >>> from xdoctest.static_analysis import *\n >>> pkgpath = modname_to_modpath('xdoctest')\n >>> paths = list(package_modpaths(pkgpath))\n >>> print('\\n'.join(paths))\n >>> names = list(map(modpath_to_modname, paths))\n >>> assert 'xdoctest.core' in names\n >>> assert 'xdoctest.__main__' in names\n >>> assert 'xdoctest' not in names\n >>> print('\\n'.join(names))", "id": "f2083:m6"} {"signature": "def _pkgutil_modname_to_modpath(modname): ", "body": "import pkgutilloader = pkgutil.find_loader(modname)if loader is None:raise Exception(''.format(modname))modpath = loader.get_filename().replace('', '')return modpath", "docstring": "faster version of `_syspath_modname_to_modpath` using builtin python\nmechanisms, but unfortunately it doesn't play nice with pytest.\n\nExample:\n >>> # xdoctest: +SKIP\n >>> modname = 'xdoctest.static_analysis'\n >>> _pkgutil_modname_to_modpath(modname)\n ...static_analysis.py\n >>> _pkgutil_modname_to_modpath('_ctypes')\n ..._ctypes...\n\nIgnore:\n >>> _pkgutil_modname_to_modpath('cv2')", "id": "f2083:m10"} {"signature": "def normalize_modpath(modpath, hide_init=True, hide_main=False):", "body": "if six.PY2:if modpath.endswith(''):modpath = modpath[:-]if hide_init:if basename(modpath) == '':modpath = dirname(modpath)hide_main = Trueelse:modpath_with_init = join(modpath, '')if exists(modpath_with_init):modpath = modpath_with_initif hide_main:if basename(modpath) == '':parallel_init = join(dirname(modpath), '')if exists(parallel_init):modpath = dirname(modpath)return modpath", "docstring": "Normalizes __init__ and __main__ paths.\n\nNotes:\n Adds __init__ if reasonable, but only removes __main__ by default\n\nArgs:\n hide_init (bool): if True, always return package modules\n as __init__.py files otherwise always return the dpath.\n hide_init (bool): if True, always strip away main files otherwise\n ignore __main__.py.\n\nCommandLine:\n xdoctest -m xdoctest.static_analysis normalize_modpath\n\nExample:\n >>> import xdoctest.static_analysis as static\n >>> modpath = static.__file__\n >>> assert static.normalize_modpath(modpath) == modpath.replace('.pyc', '.py')\n >>> dpath = dirname(modpath)\n >>> res0 = static.normalize_modpath(dpath, hide_init=0, hide_main=0)\n >>> res1 = static.normalize_modpath(dpath, hide_init=0, hide_main=1)\n >>> res2 = static.normalize_modpath(dpath, hide_init=1, hide_main=0)\n >>> res3 = static.normalize_modpath(dpath, hide_init=1, hide_main=1)\n >>> assert res0.endswith('__init__.py')\n >>> assert res1.endswith('__init__.py')\n >>> assert not res2.endswith('.py')\n >>> assert not res3.endswith('.py')", "id": "f2083:m5"} {"signature": "def modpath_to_modname(modpath, hide_init=True, hide_main=False, check=True,relativeto=None):", "body": "if check and relativeto is None:if not exists(modpath):raise ValueError(''.format(modpath))modpath_ = abspath(expanduser(modpath))modpath_ = normalize_modpath(modpath_, hide_init=hide_init,hide_main=hide_main)if relativeto:dpath = dirname(abspath(expanduser(relativeto)))rel_modpath = relpath(modpath_, dpath)else:dpath, rel_modpath = split_modpath(modpath_, check=check)modname = splitext(rel_modpath)[]if '' in modname:modname, abi_tag = modname.split('')modname = modname.replace('', '')modname = modname.replace('', '')return modname", "docstring": "Determines importable name from file path\n\nConverts the path to a module (__file__) to the importable python name\n(__name__) without importing the module.\n\nThe filename is converted to a module name, and parent directories are\nrecursively included until a directory without an __init__.py file is\nencountered.\n\nArgs:\n modpath (str): module filepath\n hide_init (bool): removes the __init__ suffix (default True)\n hide_main (bool): removes the __main__ suffix (default False)\n check (bool): if False, does not raise an error if modpath is a dir\n and does not contain an __init__ file.\n relativeto (str, optional): if specified, all checks are ignored and\n this is considered the path to the root module.\n\nReturns:\n str: modname\n\nRaises:\n ValueError: if check is True and the path does not exist\n\nCommandLine:\n xdoctest -m xdoctest.static_analysis modpath_to_modname\n\nExample:\n >>> from xdoctest import static_analysis\n >>> modpath = static_analysis.__file__.replace('.pyc', '.py')\n >>> modpath = modpath.replace('.pyc', '.py')\n >>> modname = modpath_to_modname(modpath)\n >>> assert modname == 'xdoctest.static_analysis'\n\nExample:\n >>> import xdoctest\n >>> assert modpath_to_modname(xdoctest.__file__.replace('.pyc', '.py')) == 'xdoctest'\n >>> assert modpath_to_modname(dirname(xdoctest.__file__.replace('.pyc', '.py'))) == 'xdoctest'\n\nExample:\n >>> modpath = modname_to_modpath('_ctypes')\n >>> modname = modpath_to_modname(modpath)\n >>> assert modname == '_ctypes'", "id": "f2083:m8"} {"signature": "def parse_description():", "body": "from os.path import dirname, join, existsreadme_fpath = join(dirname(__file__), '')if exists(readme_fpath):with open(readme_fpath, '') as f:text = f.read()return textreturn ''", "docstring": "Parse the description in the README file\n\nCommandLine:\n pandoc --from=markdown --to=rst --output=README.rst README.md\n python -c \"import setup; print(setup.parse_description())\"", "id": "f2084:m2"} {"signature": "def error(msg, exit_code):", "body": "sys.stderr.write(\"\" % msg)sys.stderr.flush()exit(exit_code)", "docstring": "Print `msg` error and exit with status `exit_code`", "id": "f2092:m1"} {"signature": "def load_fixture(filename):", "body": "path = os.path.join(os.path.dirname(__file__), '', filename)with open(path) as fptr:return fptr.read()", "docstring": "Load a fixture.", "id": "f2093:m0"} {"signature": "def update(self):", "body": "cameras = self._api.camera_list()self._cameras_by_id = {v.camera_id: v for i, v in enumerate(cameras)}motion_settings = []for camera_id in self._cameras_by_id.keys():motion_setting = self._api.camera_event_motion_enum(camera_id)motion_settings.append(motion_setting)self._motion_settings_by_id = {v.camera_id: v for i, v in enumerate(motion_settings)}", "docstring": "Update cameras and motion settings with latest from API.", "id": "f2094:c0:m1"} {"signature": "def enable_camera(self, camera_id):", "body": "return self._api.camera_enable(camera_id)", "docstring": "Enable camera(s) - multiple ID or single ex 1 or 1,2,3.", "id": "f2094:c0:m6"} {"signature": "def get_all_cameras(self):", "body": "return self._cameras_by_id.values()", "docstring": "Return a list of cameras.", "id": "f2094:c0:m2"} {"signature": "def get_camera(self, camera_id):", "body": "return self._cameras_by_id[camera_id]", "docstring": "Return camera matching camera_id.", "id": "f2094:c0:m3"} {"signature": "def __init__(self, camera_id, data):", "body": "self._camera_id = camera_idself._source = data['']", "docstring": "Initialize a Surveillance Station motion setting.", "id": "f2096:c2:m0"} {"signature": "@propertydef camera_id(self):", "body": "return self._camera_id", "docstring": "Return id of the camera.", "id": "f2096:c2:m1"} {"signature": "def __init__(self, url, username, password, timeout=, verify_ssl=True):", "body": "self._base_url = url + ''self._username = usernameself._password = passwordself._timeout = timeoutself._verify_ssl = verify_sslself._api_info = Noneself._sid = Noneself._initialize_api_info()self._initialize_api_sid()", "docstring": "Initialize a Synology Surveillance API.", "id": "f2096:c0:m0"} {"signature": "def read_config_(self, cfile):", "body": "if not cfile.exists():return {}try:conf_dict = toml.load(str(cfile))except toml.TomlDecodeError:return Noneself.update_(conf_dict)return conf_dict", "docstring": "Read a config file and set config values accordingly.\n\n Returns:\n dict: content of config file.", "id": "f2102:c2:m17"} {"signature": "def options_(self):", "body": "return iter(self)", "docstring": "Iterator over configuration option names.\n\n Yields:\n option names.", "id": "f2102:c1:m9"} {"signature": "def defaults_(self):", "body": "return self.def_.items()", "docstring": "Iterator over option names, and option metadata.\n\n Yields:\n tuples with option names, and :class:`Conf` instances holding\n option metadata.", "id": "f2102:c1:m11"} {"signature": "def create_config_(self, index=, update=False):", "body": "if not self.config_files_[index:]:returnpath = self.config_files_[index]if not path.parent.exists():path.parent.mkdir(parents=True)conf_dict = {}for section in self.sections_():conf_opts = [o for o, m in self[section].defaults_() if m.conf_arg]if not conf_opts:continueconf_dict[section] = {}for opt in conf_opts:conf_dict[section][opt] = (self[section][opt] if update elseself[section].def_[opt].default)with path.open('') as cfile:toml.dump(conf_dict, cfile)", "docstring": "Create config file.\n\n Create config file in :attr:`config_files_[index]`.\n\n Parameters:\n index(int): index of config file.\n update (bool): if set to True and :attr:`config_files_` already\n exists, its content is read and all the options it sets are\n kept in the produced config file.", "id": "f2102:c2:m15"} {"signature": "def reset_(self):", "body": "for sct, opt, meta in self.defaults_():self[sct][opt] = meta.default", "docstring": "Restore default values of all options.", "id": "f2102:c2:m14"} {"signature": "@classmethoddef from_dict_(cls, conf_dict):", "body": "return cls(**{name: Section(**opts)for name, opts in conf_dict.items()})", "docstring": "Use a dictionary to create a :class:`ConfigurationManager`.\n\n Args:\n conf_dict (dict of dict of :class:`ConfOpt`): the first level of\n keys should be the section names. The second level should be\n the option names. The values are the options metadata.\n\n Returns:\n :class:`ConfigurationManager`: a configuration manager with the\n requested sections and options.", "id": "f2102:c2:m1"} {"signature": "def _cmd_opts_solver(self, cmd_name):", "body": "sections = self.sections_list(cmd_name)cmd_dict = self._opt_cmds[cmd_name] if cmd_name else self._opt_barefor sct in reversed(sections):for opt, opt_meta in self._conf[sct].def_.items():if not opt_meta.cmd_arg:continueif opt not in cmd_dict:cmd_dict[opt] = sctelse:warnings.warn(''.format(cmd_name, sct, opt, cmd_dict[opt]),error.LoamWarning, stacklevel=)", "docstring": "Scan options related to one command and enrich _opt_cmds.", "id": "f2103:c1:m5"} {"signature": "def _build_parser(self):", "body": "main_parser = argparse.ArgumentParser(description=self.common.help,prefix_chars='')self._add_options_to_parser(self._opt_bare, main_parser)main_parser.set_defaults(**self.common.defaults)if self.bare is not None:main_parser.set_defaults(**self.bare.defaults)subparsers = main_parser.add_subparsers(dest='')for cmd_name, meta in self.subcmds.items():kwargs = {'': '', '': meta.help}dummy_parser = subparsers.add_parser(cmd_name, **kwargs)self._add_options_to_parser(self._opt_cmds[cmd_name], dummy_parser)dummy_parser.set_defaults(**meta.defaults)return main_parser", "docstring": "Build command line argument parser.\n\n Returns:\n :class:`argparse.ArgumentParser`: the command line argument parser.\n You probably won't need to use it directly. To parse command line\n arguments and update the :class:`ConfigurationManager` instance\n accordingly, use the :meth:`parse_args` method.", "id": "f2103:c1:m7"} {"signature": "def _zsh_comp_command(self, zcf, cmd, grouping, add_help=True):", "body": "if add_help:if grouping:print(\"\", end=BLK, file=zcf)print(\"\", end=BLK, file=zcf)print(\"\", end=BLK, file=zcf)no_comp = ('', '')cmd_dict = self._opt_cmds[cmd] if cmd else self._opt_barefor opt, sct in cmd_dict.items():meta = self._conf[sct].def_[opt]if meta.cmd_kwargs.get('') == '':grpfmt, optfmt = \"\", \"\"if meta.comprule is None:meta.comprule = ''else:grpfmt, optfmt = \"\", \"\"if meta.cmd_kwargs.get('') in no_compor meta.cmd_kwargs.get('') == :meta.comprule = Noneif meta.comprule is None:compstr = ''elif meta.comprule == '':optfmt = optfmt.split('')optfmt = optfmt[] + '' + optfmt[]compstr = ''else:optfmt = optfmt.split('')optfmt = optfmt[] + '' + optfmt[]compstr = ''.format(meta.comprule)if grouping:print(grpfmt.format(opt), end=BLK, file=zcf)for name in _names(self._conf[sct], opt):print(optfmt.format(name, meta.help.replace(\"\", \"\"),compstr), end=BLK, file=zcf)", "docstring": "Write zsh _arguments compdef for a given command.\n\n Args:\n zcf (file): zsh compdef file.\n cmd (str): command name, set to None or '' for bare command.\n grouping (bool): group options (zsh>=5.4).\n add_help (bool): add an help option.", "id": "f2103:c1:m9"} {"signature": "def config_conf_section():", "body": "config_dict = OrderedDict((('',ConfOpt(None, True, None, {'': ''},False, '')),('',ConfOpt(None, True, None, {'': ''},False, '')),('',ConfOpt(None, True, None, {'': ''},False, '')),('',ConfOpt(None, True, None, {'': ''},False, '')),('',ConfOpt('', False, None, {}, True, '')),))return config_dict", "docstring": "Define a configuration section handling config file.\n\n Returns:\n dict of ConfOpt: it defines the 'create', 'update', 'edit' and 'editor'\n configuration options.", "id": "f2106:m1"} {"signature": "def set_conf_opt(shortname=None):", "body": "return ConfOpt(None, True, shortname,dict(action='', metavar=''),False, '')", "docstring": "Define a Confopt to set a config option.\n\n You can feed the value of this option to :func:`set_conf_str`.\n\n Args:\n shortname (str): shortname for the option if relevant.\n\n Returns:\n :class:`~loam.manager.ConfOpt`: the option definition.", "id": "f2106:m2"} {"signature": "def __init__(self, option):", "body": "self.option = optionsuper().__init__(''.format(option))", "docstring": "Initialization of instances:\n\n Args:\n option (str): invalid subcommand name.\n\n Attributes:\n option (str): invalid subcommand name.", "id": "f2107:c4:m0"} {"signature": "def with_temp_dir(f):", "body": "@wraps(f)def wrapped(*args, **kwargs):with TemporaryDirectory() as temp_dir:return f(*args, temp_dir=temp_dir, **kwargs)return wrapped", "docstring": "A wrapper that creates and deletes a temporary directory.", "id": "f2120:m0"} {"signature": "def apply_options(self, cmd, options=()):", "body": "for option in (self.default_cmd_options + options):cmd = self.apply_option(cmd, option,active=getattr(self, option, False))return cmd", "docstring": "Apply command-line options.", "id": "f2122:c0:m5"} {"signature": "def initialize_options(self):", "body": "self.all = Falseself.coverage = Falsesuper(test, self).initialize_options()", "docstring": "Set the default options.", "id": "f2122:c2:m0"} {"signature": "def initialize_options(self):", "body": "self.branch = ''self.fix = Falsesuper(lint, self).initialize_options()", "docstring": "Set the default options.", "id": "f2122:c1:m0"} {"signature": "def finalize_options(self):", "body": "self.verbose = bool(self.verbose)", "docstring": "Override the distutils abstract method.", "id": "f2122:c0:m2"} {"signature": "def open_file(filename):", "body": "with open(filename) as f:return f.read()", "docstring": "Open and read the file *filename*.", "id": "f2123:m0"} {"signature": "def get_system_config_dirs(app_name, app_author, force_xdg=True):", "body": "if WIN:folder = os.environ.get('')return [os.path.join(folder, app_author, app_name)]if MAC and not force_xdg:return [os.path.join('', app_name)]dirs = os.environ.get('', '')paths = [os.path.expanduser(x) for x in dirs.split(os.pathsep)]return [os.path.join(d, _pathify(app_name)) for d in paths]", "docstring": "r\"\"\"Returns a list of system-wide config folders for the application.\n\n For an example application called ``\"My App\"`` by ``\"Acme\"``,\n something like the following folders could be returned:\n\n macOS (non-XDG):\n ``['/Library/Application Support/My App']``\n Mac OS X (XDG):\n ``['/etc/xdg/my-app']``\n Unix:\n ``['/etc/xdg/my-app']``\n Windows 7:\n ``['C:\\ProgramData\\Acme\\My App']``\n\n :param app_name: the application name. This should be properly capitalized\n and can contain whitespace.\n :param app_author: The app author's name (or company). This should be\n properly capitalized and can contain whitespace.\n :param force_xdg: if this is set to `True`, then on macOS the XDG Base\n Directory Specification will be followed. Has no effect\n on non-macOS systems.", "id": "f2124:m1"} {"signature": "def additional_files(self):", "body": "return [os.path.join(f, self.filename) for f in self.additional_dirs]", "docstring": "Get a list of absolute paths to the additional config files.", "id": "f2124:c2:m5"} {"signature": "def get_user_config_dir(app_name, app_author, roaming=True, force_xdg=True):", "body": "if WIN:key = '' if roaming else ''folder = os.path.expanduser(os.environ.get(key, ''))return os.path.join(folder, app_author, app_name)if MAC and not force_xdg:return os.path.join(os.path.expanduser(''), app_name)return os.path.join(os.path.expanduser(os.environ.get('', '')),_pathify(app_name))", "docstring": "Returns the config folder for the application. The default behavior\n is to return whatever is most appropriate for the operating system.\n\n For an example application called ``\"My App\"`` by ``\"Acme\"``,\n something like the following folders could be returned:\n\n macOS (non-XDG):\n ``~/Library/Application Support/My App``\n Mac OS X (XDG):\n ``~/.config/my-app``\n Unix:\n ``~/.config/my-app``\n Windows 7 (roaming):\n ``C:\\\\Users\\\\AppData\\Roaming\\Acme\\My App``\n Windows 7 (not roaming):\n ``C:\\\\Users\\\\AppData\\Local\\Acme\\My App``\n\n :param app_name: the application name. This should be properly capitalized\n and can contain whitespace.\n :param app_author: The app author's name (or company). This should be\n properly capitalized and can contain whitespace.\n :param roaming: controls if the folder should be roaming or not on Windows.\n Has no effect on non-Windows systems.\n :param force_xdg: if this is set to `True`, then on macOS the XDG Base\n Directory Specification will be followed. Has no effect\n on non-macOS systems.", "id": "f2124:m0"} {"signature": "def format_output(self, data, headers, format_name=None,preprocessors=(), column_types=None, **kwargs):", "body": "format_name = format_name or self._format_nameif format_name not in self.supported_formats:raise ValueError(''.format(format_name))(_, _preprocessors, formatter,fkwargs) = self._output_formats[format_name]fkwargs.update(kwargs)if column_types is None:data = list(data)column_types = self._get_column_types(data)for f in unique_items(preprocessors + _preprocessors):data, headers = f(data, headers, column_types=column_types,**fkwargs)return formatter(list(data), headers, column_types=column_types, **fkwargs)", "docstring": "Format the headers and data using a specific formatter.\n\n *format_name* must be a supported formatter (see\n :attr:`supported_formats`).\n\n :param iterable data: An :term:`iterable` (e.g. list) of rows.\n :param iterable headers: The column headers.\n :param str format_name: The display format to use (optional, if the\n :class:`TabularOutputFormatter` object has a default format set).\n :param tuple preprocessors: Additional preprocessors to call before\n any formatter preprocessors.\n :param \\*\\*kwargs: Optional arguments for the formatter.\n :return: The formatted data.\n :rtype: str\n :raises ValueError: If the *format_name* is not recognized.", "id": "f2126:c0:m5"} {"signature": "def _get_type(self, value):", "body": "if value is None:return type(None)elif type(value) in int_types:return intelif type(value) in float_types:return floatelif isinstance(value, binary_type):return binary_typeelse:return text_type", "docstring": "Get the data type for *value*.", "id": "f2126:c0:m8"} {"signature": "def style_output(data, headers, style=None,header_token='',odd_row_token='',even_row_token='', **_):", "body": "if style and HAS_PYGMENTS:formatter = Terminal256Formatter(style=style)def style_field(token, field):\"\"\"\"\"\"s = StringIO()formatter.format(((token, field),), s)return s.getvalue()headers = [style_field(header_token, header) for header in headers]data = ([style_field(odd_row_token if i % else even_row_token, f)for f in r] for i, r in enumerate(data, ))return iter(data), headers", "docstring": "Style the *data* and *headers* (e.g. bold, italic, and colors)\n\n .. NOTE::\n This requires the `Pygments `_ library to\n be installed. You can install it with CLI Helpers as an extra::\n $ pip install cli_helpers[styles]\n\n Example usage::\n\n from cli_helpers.tabular_output.preprocessors import style_output\n from pygments.style import Style\n from pygments.token import Token\n\n class YourStyle(Style):\n default_style = \"\"\n styles = {\n Token.Output.Header: 'bold #ansired',\n Token.Output.OddRow: 'bg:#eee #111',\n Token.Output.EvenRow: '#0f0'\n }\n\n headers = ('First Name', 'Last Name')\n data = [['Fred', 'Roberts'], ['George', 'Smith']]\n\n data, headers = style_output(data, headers, style=YourStyle)\n\n :param iterable data: An :term:`iterable` (e.g. list) of rows.\n :param iterable headers: The column headers.\n :param str/pygments.style.Style style: A Pygments style. You can `create\n your own styles `_.\n :param str header_token: The token type to be used for the headers.\n :param str odd_row_token: The token type to be used for odd rows.\n :param str even_row_token: The token type to be used for even rows.\n :return: The styled data and headers.\n :rtype: tuple", "id": "f2131:m7"} {"signature": "def convert_to_string(data, headers, **_):", "body": "return (([utils.to_string(v) for v in row] for row in data),[utils.to_string(h) for h in headers])", "docstring": "Convert all *data* and *headers* to strings.\n\n Binary data that cannot be decoded is converted to a hexadecimal\n representation via :func:`binascii.hexlify`.\n\n :param iterable data: An :term:`iterable` (e.g. list) of rows.\n :param iterable headers: The column headers.\n :return: The processed data and headers.\n :rtype: tuple", "id": "f2131:m1"} {"signature": "def override_missing_value(data, headers, missing_value='', **_):", "body": "return (([missing_value if v is None else v for v in row] for row in data),headers)", "docstring": "Override missing values in the *data* with *missing_value*.\n\n A missing value is any value that is :data:`None`.\n\n :param iterable data: An :term:`iterable` (e.g. list) of rows.\n :param iterable headers: The column headers.\n :param missing_value: The default value to use for missing data.\n :return: The processed data and headers.\n :rtype: tuple", "id": "f2131:m2"} {"signature": "def bytes_to_string(data, headers, **_):", "body": "return (([utils.bytes_to_string(v) for v in row] for row in data),[utils.bytes_to_string(h) for h in headers])", "docstring": "Convert all *data* and *headers* bytes to strings.\n\n Binary data that cannot be decoded is converted to a hexadecimal\n representation via :func:`binascii.hexlify`.\n\n :param iterable data: An :term:`iterable` (e.g. list) of rows.\n :param iterable headers: The column headers.\n :return: The processed data and headers.\n :rtype: tuple", "id": "f2131:m4"} {"signature": "def _get_separator(num, sep_title, sep_character, sep_length):", "body": "left_divider_length = right_divider_length = sep_lengthif isinstance(sep_length, tuple):left_divider_length, right_divider_length = sep_lengthleft_divider = sep_character * left_divider_lengthright_divider = sep_character * right_divider_lengthtitle = sep_title.format(n=num + )return \"\".format(left_divider=left_divider, right_divider=right_divider, title=title)", "docstring": "Get a row separator for row *num*.", "id": "f2133:m0"} {"signature": "def adapter(data, headers, **kwargs):", "body": "keys = ('', '', '')return vertical_table(data, headers, **filter_dict_by_key(kwargs, keys))", "docstring": "Wrap vertical table in a function for TabularOutputFormatter.", "id": "f2133:m3"} {"signature": "def intlen(n):", "body": "pos = n.find('')return len(n) if pos < else pos", "docstring": "Find the length of the integer part of a number *n*.", "id": "f2135:m3"} {"signature": "def to_string(value):", "body": "if isinstance(value, binary_type):return bytes_to_string(value)else:return text_type(value)", "docstring": "Convert *value* to a string.", "id": "f2135:m1"} {"signature": "def replace(s, replace):", "body": "for r in replace:s = s.replace(*r)return s", "docstring": "Replace multiple values in a string", "id": "f2135:m7"} {"signature": "def delete_bytes(fobj, size, offset, BUFFER_SIZE=**):", "body": "locked = Falseassert < sizeassert <= offsetfobj.seek(, )filesize = fobj.tell()movesize = filesize - offset - sizeassert <= movesizetry:if movesize > :fobj.flush()try:import mmapfile_map = mmap.mmap(fobj.fileno(), filesize)try:file_map.move(offset, offset + size, movesize)finally:file_map.close()except (ValueError, EnvironmentError, ImportError):locked = lock(fobj)fobj.seek(offset + size)buf = fobj.read(BUFFER_SIZE)while buf:fobj.seek(offset)fobj.write(buf)offset += len(buf)fobj.seek(offset + size)buf = fobj.read(BUFFER_SIZE)fobj.truncate(filesize - size)fobj.flush()finally:if locked:unlock(fobj)", "docstring": "Delete size bytes of empty space starting at offset.\n\n fobj must be an open file object, open rb+ or\n equivalent. Mutagen tries to use mmap to resize the file, but\n falls back to a significantly slower method if mmap fails.", "id": "f2171:m4"} {"signature": "def insert_bytes(fobj, size, offset, BUFFER_SIZE=**):", "body": "assert < sizeassert <= offsetlocked = Falsefobj.seek(, )filesize = fobj.tell()movesize = filesize - offsetfobj.write(b'' * size)fobj.flush()try:try:import mmapfile_map = mmap.mmap(fobj.fileno(), filesize + size)try:file_map.move(offset + size, offset, movesize)finally:file_map.close()except (ValueError, EnvironmentError, ImportError):locked = lock(fobj)fobj.truncate(filesize)fobj.seek(, )padsize = sizewhile padsize:addsize = min(BUFFER_SIZE, padsize)fobj.write(b\"\" * addsize)padsize -= addsizefobj.seek(filesize, )while movesize:thismove = min(BUFFER_SIZE, movesize)fobj.seek(-thismove, )nextpos = fobj.tell()data = fobj.read(thismove)fobj.seek(-thismove + size, )fobj.write(data)fobj.seek(nextpos)movesize -= thismovefobj.flush()finally:if locked:unlock(fobj)", "docstring": "Insert size bytes of empty space starting at offset.\n\n fobj must be an open file object, open rb+ or\n equivalent. Mutagen tries to use mmap to resize the file, but\n falls back to a significantly slower method if mmap fails.", "id": "f2171:m3"} {"signature": "def delete(filename):", "body": "OggSpeex(filename).delete()", "docstring": "Remove tags from a file.", "id": "f2172:m0"} {"signature": "def as_dict(self):", "body": "d = {}for key, value in self._internal:d.setdefault(key, []).append(value)return d", "docstring": "Return a copy of the comment data in a real dict.", "id": "f2173:c4:m8"} {"signature": "def __getitem__(self, key):", "body": "values = [value for (k, value) in self._internal if k == key]if not values:raise KeyError(key)else:return values", "docstring": "A list of values for the key.\n\n This is a copy, so comment['title'].append('a title') will not\n work.", "id": "f2173:c4:m3"} {"signature": "@classmethoddef RegisterTXXXKey(cls, key, desc):", "body": "frameid = \"\" + descdef getter(id3, key):return list(id3[frameid])def setter(id3, key, value):try:frame = id3[frameid]except KeyError:enc = try:for v in value:v.encode('')except UnicodeError:enc = id3.add(mutagen.id3.TXXX(encoding=enc, text=value, desc=desc))else:frame.text = valuedef deleter(id3, key):del(id3[frameid])cls.RegisterKey(key, getter, setter, deleter)", "docstring": "Register a user-defined text frame key.\n\n Some ID3 tags are stored in TXXX frames, which allow a\n freeform 'description' which acts as a subkey,\n e.g. TXXX:BARCODE.::\n\n EasyID3.RegisterTXXXKey('barcode', 'BARCODE').", "id": "f2175:c1:m2"} {"signature": "@propertydef FrameID(self):", "body": "return type(self).__name__", "docstring": "ID3v2 three or four character frame ID", "id": "f2176:c0:m3"} {"signature": "def _get_v23_frame(self, **kwargs):", "body": "new_kwargs = {}for checker in self._framespec:name = checker.namevalue = getattr(self, name)new_kwargs[name] = checker._validate23(self, value, **kwargs)return type(self)(**new_kwargs)", "docstring": "Returns a frame copy which is suitable for writing into a v2.3 tag.\n\n kwargs get passed to the specs.", "id": "f2176:c0:m1"} {"signature": "def clear_pictures(self):", "body": "blocks = [b for b in self.metadata_blocks if b.code != Picture.code]self.metadata_blocks = blocks", "docstring": "Delete all pictures from the file.", "id": "f2177:c14:m7"} {"signature": "def load(self, filename):", "body": "self.metadata_blocks = []self.tags = Noneself.cuesheet = Noneself.seektable = Noneself.filename = filenamefileobj = StrictFileObject(open(filename, \"\"))try:self.__check_header(fileobj)while self.__read_metadata_block(fileobj):passfinally:fileobj.close()try:self.metadata_blocks[].lengthexcept (AttributeError, IndexError):raise FLACNoHeaderError(\"\")", "docstring": "Load file information from a filename.", "id": "f2177:c14:m4"} {"signature": "def add_tags(self):", "body": "if self.tags is None:self.tags = VCFLACDict()self.metadata_blocks.append(self.tags)else:raise FLACVorbisError(\"\")", "docstring": "Add a Vorbis comment block to the file.", "id": "f2177:c14:m2"} {"signature": "def __iter__(self):", "body": "return iter(text_type(self).split(u\"\"))", "docstring": "Iterate over the strings of the value (not the characters)", "id": "f2180:c8:m0"} {"signature": "def delete(filename):", "body": "try:APEv2(filename).delete()except APENoHeaderError:pass", "docstring": "Remove tags from a file.", "id": "f2180:m1"} {"signature": "def APEValue(value, kind):", "body": "if kind in (TEXT, EXTERNAL):if not isinstance(value, text_type):if PY3:raise TypeError(\"\")else:value = value.encode(\"\")if kind == TEXT:return APETextValue(value, kind)elif kind == BINARY:return APEBinaryValue(value, kind)elif kind == EXTERNAL:return APEExtValue(value, kind)else:raise ValueError(\"\")", "docstring": "APEv2 tag value factory.\n\n Use this if you need to specify the value's type manually. Binary\n and text data are automatically detected by APEv2.__setitem__.", "id": "f2180:m2"} {"signature": "def save(self, filename=None):", "body": "raise NotImplementedError", "docstring": "Save changes to a file.", "id": "f2183:c0:m2"} {"signature": "def pprint(self):", "body": "stream = \"\" % (self.info.pprint(), self.mime[])try:tags = self.tags.pprint()except AttributeError:return streamelse:return stream + ((tags and \"\" + tags) or \"\")", "docstring": "Print stream information and comment key=value pairs.", "id": "f2183:c1:m9"} {"signature": "def update_to_v24(self):", "body": "self.__update_common()if self.__unknown_version == self._V23:converted = []for frame in self.unknown_frames:try:name, size, flags = unpack('', frame[:])frame = BinaryFrame.fromData(self, flags, frame[:])except (struct.error, error):continuename = name.decode('')converted.append(self.__save_frame(frame, name=name))self.unknown_frames[:] = convertedself.__unknown_version = self._V24try:date = text_type(self.get(\"\", \"\"))if date.strip(u\"\"):self.pop(\"\")dat = text_type(self.get(\"\", \"\"))if dat.strip(\"\"):self.pop(\"\")date = \"\" % (date, dat[:], dat[:])time = text_type(self.get(\"\", \"\"))if time.strip(\"\"):self.pop(\"\")date += \"\" % (time[:], time[:])if \"\" not in self:self.add(TDRC(encoding=, text=date))except UnicodeDecodeError:passif \"\" in self:f = self.pop(\"\")if \"\" not in self:try:self.add(TDOR(encoding=, text=str(f)))except UnicodeDecodeError:passif \"\" in self:f = self.pop(\"\")if \"\" not in self:self.add(TIPL(encoding=f.encoding, people=f.people))for key in [\"\", \"\", \"\", \"\", \"\", \"\", \"\"]:if key in self:del(self[key])", "docstring": "Convert older tags into an ID3v2.4 tag.\n\n This updates old ID3v2 frames to ID3v2.4 ones (e.g. TYER to\n TDRC). If you intend to save tags, you must call this function\n at some point; it is called by default when loading the tag.", "id": "f2184:c0:m19"} {"signature": "def load(self, filename, ID3=None, **kwargs):", "body": "if ID3 is None:ID3 = self.ID3else:self.ID3 = ID3self.filename = filenametry:self.tags = ID3(filename, **kwargs)except error:self.tags = Noneif self.tags is not None:try:offset = self.tags.sizeexcept AttributeError:offset = Noneelse:offset = Nonetry:fileobj = open(filename, \"\")self.info = self._Info(fileobj, offset)finally:fileobj.close()", "docstring": "Load stream and tag information from a file.\n\n A custom tag reader may be used in instead of the default\n mutagen.id3.ID3 object, e.g. an EasyID3 reader.", "id": "f2184:c1:m2"} {"signature": "def add(self, frame):", "body": "return self.loaded_frame(frame)", "docstring": "Add a frame to the tag.", "id": "f2184:c0:m8"} {"signature": "def __update_common(self):", "body": "if \"\" in self:self[\"\"].genres = self[\"\"].genresif self.version < self._V23:pics = self.getall(\"\")mimes = {\"\": \"\", \"\": \"\"}self.delall(\"\")for pic in pics:newpic = APIC(encoding=pic.encoding, mime=mimes.get(pic.mime, pic.mime),type=pic.type, desc=pic.desc, data=pic.data)self.add(newpic)self.delall(\"\")", "docstring": "Updates done by both v23 and v24 update", "id": "f2184:c0:m18"} {"signature": "def setall(self, key, values):", "body": "self.delall(key)for tag in values:self[tag.HashKey] = tag", "docstring": "Delete frames of the given type and add frames in 'values'.", "id": "f2184:c0:m5"} {"signature": "def pprint(self):", "body": "frames = sorted(Frame.pprint(s) for s in self.values())return ''.join(frames)", "docstring": "Return tags in a human-readable format.\n\n \"Human-readable\" is used loosely here. The format is intended\n to mirror that used for Vorbis or APEv2 output, e.g.\n\n ``TIT2=My Title``\n\n However, ID3 frames can have multiple keys:\n\n ``POPM=user@example.org=3 128/255``", "id": "f2184:c0:m6"} {"signature": "def delall(self, key):", "body": "if key in self:del(self[key])else:key = key + \"\"for k in self.keys():if k.startswith(key):del(self[k])", "docstring": "Delete all tags of a given kind; see getall.", "id": "f2184:c0:m4"} {"signature": "def load(self, filename, known_frames=None, translate=True, v2_version=):", "body": "if not v2_version in (, ):raise ValueError(\"\")from os.path import getsizeself.filename = filenameself.__known_frames = known_framesself._fileobj = open(filename, '')self.__filesize = getsize(filename)try:try:self._load_header()except EOFError:self.size = raise ID3NoHeaderError(\"\" % (filename, self.__filesize))except (ID3NoHeaderError, ID3UnsupportedVersionError) as err:self.size = import sysstack = sys.exc_info()[]try:self._fileobj.seek(-, )except EnvironmentError:reraise(err, None, stack)else:frames = ParseID3v1(self._fileobj.read())if frames is not None:self.version = self._V11for v in frames.values():self.add(v)else:reraise(err, None, stack)else:frames = self.__known_framesif frames is None:if self._V23 <= self.version:frames = Frameselif self._V22 <= self.version:frames = Frames_2_2data = self.__fullread(self.size - )for frame in self.__read_frames(data, frames=frames):if isinstance(frame, Frame):self.add(frame)else:self.unknown_frames.append(frame)self.__unknown_version = self.versionfinally:self._fileobj.close()del self._fileobjdel self.__filesizeif translate:if v2_version == :self.update_to_v23()else:self.update_to_v24()", "docstring": "Load tags from a filename.\n\n Keyword arguments:\n\n * filename -- filename to load tag data from\n * known_frames -- dict mapping frame IDs to Frame objects\n * translate -- Update all tags to ID3v2.3/4 internally. If you\n intend to save, this must be true or you have to\n call update_to_v23() / update_to_v24() manually.\n * v2_version -- if update_to_v23 or update_to_v24 get called (3 or 4)\n\n Example of loading a custom frame::\n\n my_frames = dict(mutagen.id3.Frames)\n class XMYF(Frame): ...\n my_frames[\"XMYF\"] = XMYF\n mutagen.id3.ID3(filename, known_frames=my_frames)", "id": "f2184:c0:m2"} {"signature": "def delete(self, filename=None):", "body": "if filename is None:filename = self.filenameself.tags.clear()fileobj = open(filename, \"\")try:try:self.tags._inject(fileobj)except error as e:reraise(self._Error, e, sys.exc_info()[])except EOFError:raise self._Error(\"\")finally:fileobj.close()", "docstring": "Remove tags from a file.\n\n If no filename is given, the one most recently loaded is used.", "id": "f2186:c2:m1"} {"signature": "def load(self, filename):", "body": "self.filename = filenamefileobj = open(filename, \"\")try:try:self.info = self._Info(fileobj)self.tags = self._Tags(fileobj, self.info)self.info._post_tags(fileobj)except error as e:reraise(self._Error, e, sys.exc_info()[])except EOFError:raise self._Error(\"\")finally:fileobj.close()", "docstring": "Load file information from a filename.", "id": "f2186:c2:m0"} {"signature": "def __eq__(self, other):", "body": "try:return (self.write() == other.write())except AttributeError:return False", "docstring": "Two Ogg pages are the same if they write the same data.", "id": "f2186:c1:m1"} {"signature": "@staticmethoddef to_packets(pages, strict=False):", "body": "serial = pages[].serialsequence = pages[].sequencepackets = []if strict:if pages[].continued:raise ValueError(\"\")if not pages[-].complete:raise ValueError(\"\")elif pages and pages[].continued:packets.append([b\"\"])for page in pages:if serial != page.serial:raise ValueError(\"\" % page)elif sequence != page.sequence:raise ValueError(\"\" % page)else:sequence += if page.continued:packets[-].append(page.packets[])else:packets.append([page.packets[]])packets.extend([p] for p in page.packets[:])return [b\"\".join(p) for p in packets]", "docstring": "Construct a list of packet data from a list of Ogg pages.\n\n If strict is true, the first page must start a new packet,\n and the last page must end the last packet.", "id": "f2186:c1:m7"} {"signature": "def save(self, filename=None):", "body": "if filename is None:filename = self.filenamefileobj = open(filename, \"\")try:try:self.tags._inject(fileobj)except error as e:reraise(self._Error, e, sys.exc_info()[])except EOFError:raise self._Error(\"\")finally:fileobj.close()", "docstring": "Save a tag to a file.\n\n If no filename is given, the one most recently loaded is used.", "id": "f2186:c2:m2"} {"signature": "def delete(filename):", "body": "OggOpus(filename).delete()", "docstring": "Remove tags from a file.", "id": "f2191:m0"} {"signature": "def as_dict(self):", "body": "d = {}for key, value in self._internal:d.setdefault(key.lower(), []).append(value)return d", "docstring": "Return a copy of the comment data in a real dict.", "id": "f2192:c4:m9"} {"signature": "def __update_parents(self, fileobj, path, delta):", "body": "for atom in path:fileobj.seek(atom.offset)size = cdata.uint_be(fileobj.read())if size == : size = cdata.ulonglong_be(fileobj.read()[:])fileobj.seek(atom.offset + )fileobj.write(cdata.to_ulonglong_be(size + delta))else: fileobj.seek(atom.offset)fileobj.write(cdata.to_uint_be(size + delta))", "docstring": "Update all parent atoms with the new size.", "id": "f2193:c8:m7"} {"signature": "def path(self, *names):", "body": "path = [self]for name in names:path.append(path[-][name, ])return path[:]", "docstring": "Look up and return the complete path of an atom.\n\n For example, atoms.path('moov', 'udta', 'meta') will return a\n list of three atoms, corresponding to the moov, udta, and meta\n atoms.", "id": "f2193:c7:m1"} {"signature": "def findall(self, name, recursive=False):", "body": "if self.children is not None:for child in self.children:if child.name == name:yield childif recursive:for atom in child.findall(name, True):yield atom", "docstring": "Recursively find all child atoms by specified name.", "id": "f2193:c6:m2"} {"signature": "@lru_cache(maxsize=)def selectPolicy(self, origin, request_method=None):", "body": "ret_origin = Nonepolicyname = Noneif self.matchstrategy in (\"\", \"\"):for pol in self.activepolicies:policy=self.policies[pol]ret_origin = Nonepolicyname = policy.nameif policyname == \"\":breakif self.matchstrategy == \"\":if policy.methods != \"\" and not CORS.matchlist(request_method, policy.methods, case_sensitive=True):continueif origin and policy.match:if CORS.matchlist(origin, policy.match):ret_origin = originelif policy.origin == \"\":ret_origin = originelif policy.origin:ret_origin = policy.originif ret_origin:breakreturn policyname, ret_origin", "docstring": "Based on the matching strategy and the origin and optionally the requested method a tuple of policyname and origin to pass back is returned.", "id": "f2201:c0:m3"} {"signature": "def non_preflight_are_not_answered(corsed, hdr):", "body": "req = prepRequest(hdr)res = req.get_response(corsed)assert res.body.decode(\"\") == \"\", \"\" % res.body", "docstring": "requests that don't match preflight criteria are ignored", "id": "f2203:m4"} {"signature": "def post_process(func):", "body": "@wraps(func)def wrapper(*args, **kwargs):res = func(*args, **kwargs)project_command = args[]project_handler = project_command.vcp.project_handlerif not project_handler:return reskwargs[''] = resgetattr(project_handler, func.__name__)(**kwargs)return resreturn wrapper", "docstring": "Calls the project handler same named function\n\n Note: the project handler may add some extra arguments to the command,\n so when use this decorator, add **kwargs to the end of the arguments", "id": "f2213:m0"} {"signature": "@abstractmethoddef install_to(self, project, env):", "body": "pass", "docstring": "Install current (self.project) project to the argument one.\n\n Args:\n project (Project): the target project\n env (EnvironmentBase): the target environment\n\n Returns:\n True if the install is succeeded, else False", "id": "f2217:c1:m2"} {"signature": "def publish(self):", "body": "with open( self.file_name, '') as fh:for k,v in self.args.iteritems():buf = StringIO.StringIO()buf.write(k)self._append_vals(buf,v)fh.write(buf.getvalue() + '')buf.close()", "docstring": "Write output file.", "id": "f2233:c0:m3"} {"signature": "def _post_unpack(self,items):", "body": "return items", "docstring": "perform data modification of any values, after unpacking from a buffer.", "id": "f2234:c0:m3"} {"signature": "@staticmethoddef verify(data):", "body": "if len(data) == :return Falsecrc = VProCRC.get(data)if crc:log.info(\"\")else:log.debug(\"\")return not crc", "docstring": "perform CRC check on raw serial data, return true if valid.\na valid CRC == 0.", "id": "f2236:c1:m1"} {"signature": "@staticmethoddef _unpack_storm_date(date):", "body": "year = (date & ) + day = (date >> ) & month = (date >> ) & return \"\" % (year, month, day)", "docstring": "given a packed storm date field, unpack and return 'YYYY-MM-DD' string.", "id": "f2236:c2:m3"} {"signature": "@staticmethoddef _unpack_time(val):", "body": "return \"\" % divmod(val, )", "docstring": "given a packed time field, unpack and return \"HH:MM\" string.", "id": "f2236:c2:m2"} {"signature": "def __del__(self):", "body": "self.port.close()", "docstring": "close serial port when object is deleted.", "id": "f2236:c6:m3"} {"signature": "def _calc_derived_fields(self, fields):", "body": "temp = fields['']hum = fields['']wind = fields['']wind10min = fields['']fields[''] = calc_heat_index(temp, hum)fields[''] = calc_wind_chill(temp, wind, wind10min)fields[''] = calc_dewpoint(temp, hum)now = time.localtime()fields[''] = time.strftime(\"\", now)fields[''] = now[]fields[''] = str(now[]).zfill()now = time.gmtime()fields[''] = time.strftime(\"\", now)fields[''] = now[]fields[''] = str(now[]).zfill()", "docstring": "calculates the derived fields (those fields that are calculated)", "id": "f2236:c6:m11"} {"signature": "def m_sec_to_mph(m):", "body": "return m * ", "docstring": "Meters/second (m/s) to miles/hour (mph)", "id": "f2242:m20"} {"signature": "def knots_to_mph(kts):", "body": "return kts * ", "docstring": "Knots (kt) to miles/hour (mph)", "id": "f2242:m3"} {"signature": "def km_hr_to_knots(km):", "body": "return km * ", "docstring": "Kilometers/hour (kph) to knots (kt)", "id": "f2242:m6"} {"signature": "def nmph_to_knots(mph):", "body": "return mph", "docstring": "Nautical miles/hour (nmph) to knots (kt)", "id": "f2242:m9"} {"signature": "def knots_to_nmph(kts):", "body": "return kts", "docstring": "Knots (kt) to nautical miles/hour (nmph)", "id": "f2242:m4"} {"signature": "def mph_to_knots(mph):", "body": "return mph * ", "docstring": "Miles/hour (mph) to knots (kt)", "id": "f2242:m8"} {"signature": "def mb_to_atm(mb):", "body": "return mb * ", "docstring": "Millibars (mb) to atmospheres (atm)", "id": "f2246:m11"} {"signature": "def in60_to_lbs(inches):", "body": "return inches * ", "docstring": "Inches of mercury @60F (inHg60) to pounds/square inch (lb/in**2)", "id": "f2246:m10"} {"signature": "def lb_sqin_to_mb(lbs):", "body": "return lbs * ", "docstring": "Pounds/square inch (lb/in**2) to millibars (mb)", "id": "f2246:m33"} {"signature": "def mb_to_n_sqm(mb):", "body": "return mb * ", "docstring": "Millibars (mb) to newtons/square meter (N/m**2)", "id": "f2246:m18"} {"signature": "def mb_to_hpa(mb):", "body": "return mb", "docstring": "Millibars (mb) to hectopascals (hPa)", "id": "f2246:m12"} {"signature": "def atm_to_mb(atm):", "body": "return atm * ", "docstring": "Atmospheres (atm) to millibars (mb)", "id": "f2246:m2"} {"signature": "def lb_sqft_to_mb(lbs):", "body": "return lbs * ", "docstring": "Pounds/square foot (lb/ft**2) to millibars (mb)", "id": "f2246:m29"} {"signature": "def kelvin_to_rankine(k):", "body": "return k * ", "docstring": "Degrees Kelvin (K) to degrees Rankine (R)", "id": "f2247:m8"} {"signature": "def fahrenheit_to_rankine(f):", "body": "return f + ", "docstring": "Degrees Fahrenheit (F) to degrees Rankine (R)", "id": "f2247:m5"} {"signature": "def kelvin_to_celsius(k):", "body": "return k - ", "docstring": "Degrees Kelvin (K) to degrees Celsius (C)", "id": "f2247:m6"} {"signature": "def get_pub_services(opts):", "body": "sites = []for p_key in vars(opts).keys():args = getattr(opts,p_key)if p_key in PUB_SERVICES and args:if isinstance(args,tuple):ps = PUB_SERVICES[p_key](*args)else:ps = PUB_SERVICES[p_key](args)sites.append( ps )return sites", "docstring": "use values in opts data to generate instances of publication services.", "id": "f2249:m2"} {"signature": "def get( self, station, interval ):", "body": "rec = station.fields['']if rec:threshold = station.fields[''] + GUST_MPH_MINif rec[''] >= threshold:self.value = (rec[''],rec[''])self.count = GUST_TTL * / intervalelse:self.value = self.NO_VALUEif self.count:self.count -= else:self.value = self.NO_VALUElog.debug(''.format(*self.value))return self.value", "docstring": "return gust data, if above threshold value and current time is inside\nreporting window period", "id": "f2249:c1:m1"} {"signature": "def compress(string, mode=MODE_GENERIC, quality=, lgwin=, lgblock=):", "body": "compressor = Compressor(mode=mode, quality=quality, lgwin=lgwin,lgblock=lgblock)return compressor.process(string) + compressor.finish()", "docstring": "Compress a byte string.\n\n Args:\n string (bytes): The input data.\n mode (int, optional): The compression mode can be MODE_GENERIC (default),\n MODE_TEXT (for UTF-8 format text input) or MODE_FONT (for WOFF 2.0).\n quality (int, optional): Controls the compression-speed vs compression-\n density tradeoff. The higher the quality, the slower the compression.\n Range is 0 to 11. Defaults to 11.\n lgwin (int, optional): Base 2 logarithm of the sliding window size. Range\n is 10 to 24. Defaults to 22.\n lgblock (int, optional): Base 2 logarithm of the maximum input block size.\n Range is 16 to 24. If set to 0, the value will be set based on the\n quality. Defaults to 0.\n\n Returns:\n The compressed byte string.\n\n Raises:\n brotli.error: If arguments are invalid, or compressor fails.", "id": "f2256:m0"} {"signature": "def __repr__(self):", "body": "return \"\".format(self.pos>>, self.pos&)", "docstring": "Representation\n >>> olleke\n BitStream(pos=0:0)", "id": "f2258:c1:m1"} {"signature": "def span(self, index):", "body": "lower = self.value0+sum(<upper = lower+(<return lower, upper-", "docstring": "Give the range of possible values in a tuple\n Useful for mnemonic and explanation", "id": "f2258:c8:m4"} {"signature": "def value(self, dcode, dextra):", "body": "if dcode<:return [(,),(,),(,),(,),(,-),(,+),(,-),(,+),(,-),(,+),(,-),(,+),(,-),(,+),(,-),(,+)][dcode]if dcode<+self.NDIRECT:return (,dcode-)POSTFIX_MASK = ( << self.NPOSTFIX) - ndistbits = + ((dcode - self.NDIRECT - ) >> (self.NPOSTFIX + ))hcode = (dcode - self.NDIRECT - ) >> self.NPOSTFIXlcode = (dcode - self.NDIRECT - ) & POSTFIX_MASKoffset = (( + (hcode & )) << ndistbits) - distance = ((offset + dextra) << self.NPOSTFIX) + lcode + self.NDIRECT + return (,distance)", "docstring": "Decode value of symbol together with the extra bits.\n >>> d = DistanceAlphabet('D', NPOSTFIX=2, NDIRECT=10)\n >>> d[34].value(2)\n (0, 35)", "id": "f2258:c29:m2"} {"signature": "def __getitem__(self, index):", "body": "if index>=len(self.extraTable):raise ValueError(\"\".format(self.__class__.__name__, index))return Symbol(self, index)", "docstring": "Faster than PrefixDecoder", "id": "f2258:c8:m2"} {"signature": "def makeHexData(self, pos):", "body": "firstAddress = pos+>>lastAddress = self.stream.pos+>>return ''.join(map(''.format,self.stream.data[firstAddress:lastAddress]))", "docstring": "Produce hex dump of all data containing the bits\n from pos to stream.pos", "id": "f2258:c32:m1"} {"signature": "def explanation(self, index, extra=None):", "body": "extraBits = if extra is None else self.extraBits(index)if not hasattr(self, ''):formatString = ''lo = hi = value = self.value(index, extra)elif extraBits==:formatString = ''lo, hi = self.span(index)value = loelse:formatString = ''lo, hi = self.span(index)value = lo+extrareturn formatString.format(self.description and self.description+'',''*extraBits,self.bitPattern(index),lo, hi,extra,value,)", "docstring": "Expanded version of Code.explanation supporting extra bits.\n If you don't supply extra, it is not mentioned.", "id": "f2258:c6:m3"} {"signature": "def setDecode(self, decodeTable):", "body": "self.decodeTable = decodeTabletodo = set(decodeTable)maskLength = lengthTable = {}while todo:mask = (<splitSymbols = defaultdict(list)for s in todo: splitSymbols[s&mask].append(s)for s,subset in splitSymbols.items():if len(subset)==:lengthTable[self.decodeTable[s]] = maskLengthtodo.remove(s)maskLength +=self.lengthTable = lengthTableself.minLength = min(lengthTable.values())self.maxLength = max(lengthTable.values())self.switchToPrefix()", "docstring": "Store decodeTable,\n and compute lengthTable, minLength, maxLength from encodings.", "id": "f2258:c4:m7"} {"signature": "def peek(self, n):", "body": "return int.from_bytes(self.data[self.pos>>:self.pos+n+>>],'')>>(self.pos&) & (<", "docstring": "Peek an n bit integer from the stream without updating the pointer.\n It is not an error to read beyond the end of the stream.\n >>> olleke.data[:2]==b'\\x1b\\x2e' and 0x2e1b==11803\n True\n >>> olleke.peek(15)\n 11803\n >>> hex(olleke.peek(32))\n '0x2e1b'", "id": "f2258:c1:m3"} {"signature": "def doAction(self, w, action):", "body": "U = self.upperCase1return eval(self.actionList[action], locals())", "docstring": "Perform the proper action", "id": "f2258:c31:m4"} {"signature": "def outputFormatter(s):", "body": "result = ''def formatSubString(s):for c in s:if c==: yield ''else: yield outputCharFormatter(c)if len(result)<: return ''.join(formatSubString(s))else:return ''.join(formatSubString(s[:]))+''+''.join(formatSubString(s[-:]))", "docstring": "Show string or char.", "id": "f2258:m1"} {"signature": "def readLiteralContextModes(self):", "body": "print(''.center(, ''))self.literalContextModes = []for i in range(self.numberOfBlockTypes[L]):self.literalContextModes.append(self.verboseRead(LiteralContextMode(number=i)))", "docstring": "Read literal context modes.\n LSB6: lower 6 bits of last char\n MSB6: upper 6 bits of last char\n UTF8: rougly dependent on categories:\n upper 4 bits depend on category of last char:\n control/whitespace/space/ punctuation/quote/%/open/close/\n comma/period/=/digits/ VOWEL/CONSONANT/vowel/consonant\n lower 2 bits depend on category of 2nd last char:\n space/punctuation/digit or upper/lowercase\n signed: hamming weight of last 2 chars", "id": "f2258:c32:m10"} {"signature": "def mnemonic(self, index):", "body": "return str(self.value(index))", "docstring": "Give mnemonic of symbol.\n Override where needed.", "id": "f2258:c5:m9"} {"signature": "def switchToPrefix(self):", "body": "self.mode = PrefixDecoder", "docstring": "This routine makes sure the prefix decoder is activated.", "id": "f2258:c4:m9"} {"signature": "def __iter__(self):", "body": "return map(partial(Symbol, self), range(len(self)))", "docstring": "Produce all symbols.", "id": "f2258:c3:m2"} {"signature": "def readTupleAndExtra(self, stream):", "body": "length, symbol = self.decodePeek(stream.peek(self.maxLength))stream.pos += lengthextraBits = self.extraBits(symbol.index)return length, symbol, extraBits, stream.read(extraBits)", "docstring": "Read symbol and extrabits from stream.\n Returns symbol length, symbol, extraBits, extra\n >>> olleke.pos = 6\n >>> MetablockLengthAlphabet().readTupleAndExtra(olleke)\n (2, Symbol(MLEN, 4), 16, 46)", "id": "f2258:c6:m2"} {"signature": "def splitSymbol(self, index):", "body": "row = [,,,,,,,,,,][index>>]col = [,,,,,,,,,,][index>>]insertLengthCode = row<< | index>>&if row: insertLengthCode -= copyLengthCode = col<< | index&return (Symbol(self.insertLengthAlphabet, insertLengthCode),Symbol(self.copyLengthAlphabet, copyLengthCode),row==)", "docstring": "Give relevant values for computations:\n (insertSymbol, copySymbol, dist0flag)", "id": "f2258:c28:m3"} {"signature": "def value(self, index, extra):", "body": "if extra>:raise ValueError('')return index, extra<", "docstring": "Returns NPOSTFIX and NDIRECT<for i in range(numberOfTrees):if kind==L: alphabet = LiteralAlphabet(i)elif kind==I: alphabet = InsertAndCopyAlphabet(i)elif kind==D: alphabet = DistanceAlphabet(i, NPOSTFIX=self.NPOSTFIX, NDIRECT=self.NDIRECT)self.readPrefixCode(alphabet)prefixes.append(alphabet)self.prefixCodes[kind] = prefixes", "docstring": "Read prefix code array", "id": "f2258:c32:m13"} {"signature": "def value(self, index, extra=None):", "body": "if extra is not None:raise ValueError('')return index", "docstring": "Get value of symbol for computations.\n Override where needed.", "id": "f2258:c5:m8"} {"signature": "def value(self, index, extra):", "body": "lower, upper = self.span(index)value = lower+(extra or )if value>upper:raise ValueError('')return value", "docstring": "Override if you don't define value0 and extraTable", "id": "f2258:c8:m3"} {"signature": "def value(self, index, extra):", "body": "index = indexif index==: return , if index<=self.RLEMAX: return (<return , index-self.RLEMAX", "docstring": "Give count and value.", "id": "f2258:c24:m3"} {"signature": "def get_argument_key(kwargs, argument_key, index):", "body": "if isinstance(argument_key, (list, tuple)):return_string = ''for element in argument_key:return_val = kwargs.get(element)[index]if return_val is None:return_val = ''return_string += str(return_val)if len(argument_key) > :return_string += ''return return_stringreturn str(kwargs.get(argument_key)[index] or '')", "docstring": ":param kwargs: keyword arguments used to kickoff the multiget\n:param argument_key: Name or set of names of keyword arguments to read from\n:param index: which set of arguments are considered\n:return:", "id": "f2267:m4"} {"signature": "def map_arguments_to_objects(kwargs, objects, object_key, object_tuple_key, argument_key, result_value, default_result):", "body": "map_ = map_objects_to_result(objects, object_key, object_tuple_key, result_value, default_result)element_count = get_request_count(kwargs)return [map_[get_argument_key(kwargs, argument_key, index)] for index in range(, element_count)]", "docstring": ":param kwargs: kwargs used to call the multiget function\n:param objects: objects returned from the inner function\n:param object_key: field or set of fields that map to the kwargs provided\n:param object_tuple_key: A temporary shortcut until we allow dot.path traversal for object_key.\nWill call getattr(getattr(result, join_table_name), object_key)\n:param argument_key: field or set of fields that map to the objects provided\n:param result_value: Limit the fields returned to this field or set of fields (none = whole object)\n:param default_result: If the inner function returned none for a set of parameters, default to this\n:return:", "id": "f2267:m7"} {"signature": "@abstractmethoddef get(self, k, d=None):", "body": "pass", "docstring": "D.get(k[,d]) -> D[k] if k in D, else d. d defaults to None.", "id": "f2268:c0:m0"} {"signature": "@abstractmethoddef __getitem__(self, y):", "body": "pass", "docstring": "x.__getitem__(y) <==> x[y]", "id": "f2268:c0:m1"} {"signature": "@abstractmethoddef __contains__(self, *args, **kwargs):", "body": "pass", "docstring": "True if D has a key k, else False.", "id": "f2268:c0:m6"} {"signature": "def delete(self, *args):", "body": "cache = get_cache()key = self.get_cache_key(*args)if key in cache:del cache[key]", "docstring": "Remove the key from the request cache and from memcache.", "id": "f2269:c0:m4"} {"signature": "def __call__(self, *args, **kwargs):", "body": "key = self.get_cache_key(*args, **kwargs)cache = get_cache()if key in cache:return cache[key]else:self.prime(*args, **kwargs)self._issue_gets_for_primes()return cache[key]", "docstring": "Pull the return value from the request cache if possible. If not,\npull the value from calling inner_f, then set the value in request cache", "id": "f2270:c0:m1"} {"signature": "def tag_list(self, tags):", "body": "return [(tag.name, \"\" if tag.name in tags else \"\")for tag in self.model.objects.all()]", "docstring": "Generates a list of tags identifying those previously selected.\n\nReturns a list of tuples of the form (, ).\n\nUses the string names rather than the tags themselves in order to work\nwith tag lists built from forms not fully submitted.", "id": "f2278:c0:m2"} {"signature": "def encode_datetime(obj):", "body": "zone = '' if getattr(obj, '', True) else ''return obj.isoformat() + zone", "docstring": "Encode a datetime.datetime or datetime.date object as an ISO 8601 format string.", "id": "f2282:m6"} {"signature": "def decode_date(self, val):", "body": "if isinstance(val, basestring) and val.count('') == and len(val) > :try:dt = dateutil.parser.parse(val)if val.endswith(('', '', '')):dt = dt.replace(tzinfo=None)return dtexcept (TypeError, ValueError):passreturn val", "docstring": "Tries to decode strings that look like dates into datetime objects.", "id": "f2282:c0:m2"} {"signature": "def encode_key_as_entity(obj):", "body": "return obj.get_async()", "docstring": "Get the Entity from the ndb.Key for further encoding.", "id": "f2282:m2"} {"signature": "def is_staging(version=None):", "body": "return is_host_google() and not is_default_version(version)", "docstring": "True if the app is hosted by Google (appspot.com) but the version is not the default.", "id": "f2284:m6"} {"signature": "def get_dot_target_name_safe(version=None, module=None):", "body": "version = version or get_current_version_name_safe()module = module or get_current_module_name_safe()if version and module:return ''.join((version, module))return None", "docstring": "Returns the current version/module in -dot- notation which is used by `target:` parameters.\nIf there is no current version or module then None is returned.", "id": "f2284:m11"} {"signature": "def get_dot_target_name(version=None, module=None):", "body": "version = version or get_current_version_name()module = module or get_current_module_name()return ''.join((version, module))", "docstring": "Returns the current version/module in -dot- notation which is used by `target:` parameters.", "id": "f2284:m10"} {"signature": "def get_environ_dict():", "body": "return {'': _get_os_environ_dict(('','','','','','','','','','','','','','','','','','',)),'': _get_app_identity_dict(('','','',)),'': _get_modules_dict(('','','','','','','',)),'': _get_namespace_manager_dict(('','',)),}", "docstring": "Return a dictionary of all environment keys/values.", "id": "f2284:m16"} {"signature": "def _get_modules_dict(keys):", "body": "return {k: getattr(modules, k)() for k in keys}", "docstring": "Return a dictionary of key/values from the modules module functions.", "id": "f2284:m14"} {"signature": "@abstractmethoddef upload(self, file_name):", "body": "", "docstring": "Uploads a file to the storage.", "id": "f2299:c1:m1"} {"signature": "def connect(self):", "body": "if self.music_folder is None:music_folder = os.path.join(os.path.expanduser(''), '')if not os.path.exists(music_folder):os.makedirs(music_folder)self.music_folder = music_folder", "docstring": "Initializes the connection attribute with the path to the user home folder's Music folder, and creates it if it doesn't exist.", "id": "f2299:c5:m1"} {"signature": "def download(self, url):", "body": "try:track = self.client.get('', url=url)except HTTPError:log.error(f\"\")returnr = requests.get(self.client.get(track.stream_url, allow_redirects=False).location, stream=True)total_size = int(r.headers[''])chunk_size = file_name = track.title + ''with open(file_name, '') as f:for data in tqdm(r.iter_content(chunk_size), desc=track.title, total=total_size / chunk_size, unit='', file=sys.stdout):f.write(data)return file_name", "docstring": "Downloads a MP3 file that is associated with the track at the URL passed.\n\n:param str url: URL of the track to be downloaded", "id": "f2299:c3:m1"} {"signature": "def connect(self):", "body": "SCOPES = ''store = file.Storage('')creds = store.get()if not creds or creds.invalid:try:flow = client.flow_from_clientsecrets('', SCOPES)except InvalidClientSecretsError:log.error('')returncreds = tools.run_flow(flow, store)self.connection = build('', '', http=creds.authorize(Http()))response = self.connection.files().list(q=\"\").execute()try:folder_id = response.get('', [])[]['']except IndexError:log.warning('')folder_metadata = {'': '', '': ''}folder = self.connection.files().create(body=folder_metadata, fields='').execute()", "docstring": "Creates connection to the Google Drive API, sets the connection attribute to make requests, and creates the Music folder if it doesn't exist.", "id": "f2299:c4:m1"} {"signature": "def cost(self, t_node, branch_length, multiplicity=):", "body": "merger_time = t_node+branch_lengthreturn self.integral_merger_rate(merger_time) - self.integral_merger_rate(t_node)- np.log(self.total_merger_rate(merger_time))*(multiplicity-)/multiplicity", "docstring": "returns the cost associated with a branch starting at t_node\nt_node is time before present, the branch goes back in time\n\nArgs:\n - t_node: time of the node\n - branch_length: branch length, determines when this branch merges with sister\n - multiplicity: 2 if merger is binary, higher if this is a polytomy", "id": "f2306:c0:m6"} {"signature": "def attach_to_tree(self):", "body": "for clade in self.tree.find_clades():if clade.up is not None:clade.branch_length_interpolator.merger_cost = self.cost", "docstring": "attaches the the merger cost to each branch length interpolator in the tree.", "id": "f2306:c0:m7"} {"signature": "def calc_branch_count(self):", "body": "self.tree_events = np.array(sorted([(n.time_before_present, len(n.clades)-)for n in self.tree.find_clades() if not n.bad_branch],key=lambda x:-x[]))from collections import defaultdictdn_branch = defaultdict(int)for (t, dn) in self.tree_events:dn_branch[t]+=dnunique_mergers = np.array(sorted(dn_branch.items(), key = lambda x:-x[]))nbranches = [[ttconf.BIG_NUMBER, ], [unique_mergers[,]+ttconf.TINY_NUMBER, ]]for ti, (t, dn) in enumerate(unique_mergers[:-]):new_n = nbranches[-][]+dnnext_t = unique_mergers[ti+,]+ttconf.TINY_NUMBERnbranches.append([t, new_n])nbranches.append([next_t, new_n])new_n += unique_mergers[-,]nbranches.append([next_t, new_n])nbranches.append([-ttconf.BIG_NUMBER, new_n])nbranches=np.array(nbranches)self.nbranches = interp1d(nbranches[:,], nbranches[:,], kind='')", "docstring": "calculates an interpolation object that maps time to the number of\nconcurrent branches in the tree. The result is stored in self.nbranches", "id": "f2306:c0:m2"} {"signature": "def mugration(params):", "body": "if os.path.isfile(params.states):states = pd.read_csv(params.states, sep='' if params.states[-:]=='' else '',skipinitialspace=True)else:print(\"\")return outdir = get_outdir(params, '')taxon_name = '' if '' in states.columns else states.columns[]if params.attribute:if params.attribute in states.columns:attr = params.attributeelse:print(\"\"+params.states, file=sys.stderr)print(\"\"+\"\".join(states.columns), file=sys.stderr)return else:attr = states.columns[]print(\"\"+attr, file=sys.stderr)leaf_to_attr = {x[taxon_name]:x[attr] for xi, x in states.iterrows()if x[attr]!=params.missing_data}unique_states = sorted(set(leaf_to_attr.values()))nc = len(unique_states)if nc>:print(\"\", file=sys.stderr)return elif nc<:print(\"\", file=sys.stderr)return alphabet = [chr(+i) for i,state in enumerate(unique_states)]missing_char = chr(+nc)letter_to_state = {a:unique_states[i] for i,a in enumerate(alphabet)}letter_to_state[missing_char]=params.missing_datareverse_alphabet = {v:k for k,v in letter_to_state.items()}if params.weights:params.infer_gtr = Truetmp_weights = pd.read_csv(params.weights, sep='' if params.states[-:]=='' else '',skipinitialspace=True)weights = {row[]:row[] for ri,row in tmp_weights.iterrows()}mean_weight = np.mean(list(weights.values()))weights = np.array([weights[c] if c in weights else mean_weight for c in unique_states], dtype=float)weights/=weights.sum()else:weights = np.ones(nc, dtype=float)/ncW = np.ones((nc,nc), dtype=float)mugration_GTR = GTR.custom(pi = weights, W=W, alphabet = np.array(alphabet))mugration_GTR.profile_map[missing_char] = np.ones(nc)mugration_GTR.ambiguous=missing_chartreeanc = TreeAnc(params.tree, gtr=mugration_GTR, verbose=params.verbose,convert_upper=False, one_mutation=)pseudo_seqs = [SeqRecord(id=n.name,name=n.name,seq=Seq(reverse_alphabet[leaf_to_attr[n.name]]if n.name in leaf_to_attr else missing_char))for n in treeanc.tree.get_terminals()]treeanc.aln = MultipleSeqAlignment(pseudo_seqs)ndiff = treeanc.infer_ancestral_sequences(method='', infer_gtr=True,store_compressed=False, pc=params.pc, marginal=True, normalized_rate=False,fixed_pi=weights if params.weights else None)if ndiff==ttconf.ERROR: return print(\"\"%attr,params.tree)basename = get_basename(params, outdir)gtr_name = basename + ''with open(gtr_name, '') as ofile:ofile.write('')for state in unique_states:ofile.write(''%(reverse_alphabet[state], state))ofile.write(''+str(treeanc.gtr)+'')print(\"\", gtr_name)terminal_count = for n in treeanc.tree.find_clades():if n.up is None:continuen.confidence=Noneif n.is_terminal() and len(n.name)> and bioversion<\"\":n.name = n.name[:]+''%terminal_countterminal_count+=n.comment= ''%attr + letter_to_state[n.sequence[]] +''if params.confidence:conf_name = basename+''with open(conf_name, '') as ofile:ofile.write(''+''.join(unique_states)+'')for n in treeanc.tree.find_clades():ofile.write(n.name + ''+''.join([str(x) for x in n.marginal_profile[]])+'')print(\"\", conf_name)outtree_name = basename+''Phylo.write(treeanc.tree, outtree_name, '')print(\"\",outtree_name)return ", "docstring": "implementing treetime mugration", "id": "f2307:m12"} {"signature": "def create_gtr(params):", "body": "model = params.gtrgtr_params = params.gtr_paramsif model == '':gtr = GTR.standard('', alphabet='' if params.aa else '')else:try:kwargs = {}if gtr_params is not None:for param in gtr_params:keyval = param.split('')if len(keyval)!=: continueif keyval[] in ['', '', '', '']:keyval[] = ''keyval[] = list(map(float, keyval[].split('')))elif keyval[] not in ['']:keyval[] = float(keyval[])kwargs[keyval[]] = keyval[]else:print (\"\")gtr = GTR.standard(model, **kwargs)infer_gtr = Falseexcept:print (\"\")gtr = GTR.standard('', alphabet='' if params.aa else '')infer_gtr = Falsereturn gtr", "docstring": "parse the arguments referring to the GTR model and return a GTR structure", "id": "f2307:m1"} {"signature": "@classmethoddef custom(cls, mu=, pi=None, W=None, **kwargs):", "body": "gtr = cls(**kwargs)gtr.assign_rates(mu=mu, pi=pi, W=W)return gtr", "docstring": "Create a GTR model by specifying the matrix explicitly\n\nParameters\n----------\n\n mu : float\n Substitution rate\n\n W : nxn matrix\n Substitution matrix\n\n pi : n vector\n Equilibrium frequencies\n\n **kwargs:\n Key word arguments to be passed\n\nKeyword Args\n------------\n\n alphabet : str\n Specify alphabet when applicable. If the alphabet specification is\n required, but no alphabet is specified, the nucleotide alphabet will be used as\n default.", "id": "f2309:c0:m4"} {"signature": "@staticmethoddef calc_fwhm(distribution, is_neg_log=True):", "body": "if isinstance(distribution, interp1d):if is_neg_log:ymin = distribution.y.min()log_prob = distribution.y-yminelse:log_prob = -np.log(distribution.y)log_prob -= log_prob.min()xvals = distribution.xelif isinstance(distribution, Distribution):xvals = distribution._func.xlog_prob = distribution._func.yelse:raise TypeError(\"\"\"\");L = xvals.shape[]tmp = np.where(log_prob < )[]x_l, x_u = tmp[], tmp[-]if L < :print (\"\")return min(TINY_NUMBER, distribution.xmax - distribution.xmin)else:return max(TINY_NUMBER, xvals[min(x_u+,L-)] - xvals[max(,x_l-)])", "docstring": "Assess the width of the probability distribution. This returns\nfull-width-half-max", "id": "f2312:c0:m0"} {"signature": "@classmethoddef delta_function(cls, x_pos, weight=, min_width=MIN_INTEGRATION_PEAK):", "body": "distribution = cls(x_pos,,is_log=True, min_width=min_width)distribution.weight = weightreturn distribution", "docstring": "Create delta function distribution.", "id": "f2312:c0:m1"} {"signature": "def prepare_tree(self):", "body": "self.tree.root.branch_length = self.tree.root.mutation_length = self.tree.root.branch_lengthself.tree.root.mutations = []self.tree.ladderize()self._prepare_nodes()self._leaves_lookup = {node.name:node for node in self.tree.get_terminals()}", "docstring": "Set link to parent and calculate distance to root for all tree nodes.\nShould be run once the tree is read and after every rerooting,\ntopology change or branch length optimizations.", "id": "f2313:c0:m21"} {"signature": "def optimize_branch_length(self, mode='', **kwargs):", "body": "self.logger(\"\"%mode,)if (self.tree is None) or (self.aln is None):self.logger(\"\", )return ttconf.ERRORstore_old_dist = Falseif '' in kwargs:store_old_dist = kwargs['']if mode=='':if not hasattr(self.tree.root, \"\"):self.infer_ancestral_sequences(marginal=True)max_bl = for node in self.tree.find_clades(order=''):if node.up is None: continue if store_old_dist:node._old_length = node.branch_lengthif mode=='':new_len = self.optimal_marginal_branch_length(node)elif mode=='':new_len = self.optimal_branch_length(node)else:self.logger(\"\",, warn=True)new_len = node.branch_lengthif new_len < :continueself.logger(\"\"\"\"%(node.branch_length, new_len, len(node.mutations)*self.one_mutation), )node.branch_length = new_lennode.mutation_length=new_lenmax_bl = max(max_bl, new_len)self.tree.root.up = Noneself.tree.root.dist2root = if max_bl> and mode=='':self.logger(\"\"\"\"\"\"\"\", , warn=True)self._prepare_nodes()return ttconf.SUCCESS", "docstring": "Perform optimization for the branch lengths of the entire tree.\nThis method only does a single path and needs to be iterated.\n\n**Note** this method assumes that each node stores information\nabout its sequence as numpy.array object (node.sequence attribute).\nTherefore, before calling this method, sequence reconstruction with\neither of the available models must be performed.\n\nParameters\n----------\n\n mode : str\n Optimize branch length assuming the joint ML sequence assignment\n of both ends of the branch (:code:`joint`), or trace over all possible sequence\n assignments on both ends of the branch (:code:`marginal`) (slower, experimental).\n\n **kwargs :\n Keyword arguments\n\nKeyword Args\n------------\n\n verbose : int\n Output level\n\n store_old : bool\n If True, the old lengths will be saved in :code:`node._old_dist` attribute.\n Useful for testing, and special post-processing.", "id": "f2313:c0:m43"} {"signature": "def process_alignment_dict(self):", "body": "nseq = len(self.aln)inv_map = defaultdict(list)for k,v in self.aln.items():for pos, bs in v.items():inv_map[pos].append(bs)self.nonref_positions = np.sort(list(inv_map.keys()))self.inferred_const_sites = []ambiguous_char = self.gtr.ambiguousnonref_const = []nonref_alleles = []ambiguous_const = []variable_pos = []for pos, bs in inv_map.items(): bases = \"\".join(np.unique(bs))if len(bs) == nseq:if (len(bases)<= and ambiguous_char in bases) or len(bases)==:nonref_const.append(pos)nonref_alleles.append(bases.replace(ambiguous_char, ''))if ambiguous_char in bases: self.inferred_const_sites.append(pos)else:variable_pos.append(pos)else:if bases==ambiguous_char:ambiguous_const.append(pos)self.inferred_const_sites.append(pos) else:variable_pos.append(pos)refMod = np.array(list(self.ref))refMod[nonref_const] = nonref_allelesstates = self.gtr.alphabetrefMod[variable_pos] = ''reduced_alignment_const = []alignment_patterns_const = {}for base in states:p = base*nseqpos = list(np.where(refMod==base)[])if len(pos):alignment_patterns_const[p] = [len(reduced_alignment_const), pos]reduced_alignment_const.append(list(p))return reduced_alignment_const, alignment_patterns_const, variable_pos", "docstring": "prepare the dictionary specifying differences from a reference sequence\nto construct the reduced alignment with variable sites only. NOTE:\n - sites can be constant but different from the reference\n - sites can be constant plus a ambiguous sites\n\nassigns\n-------\n- self.nonref_positions: at least one sequence is different from ref\n\nReturns\n-------\nreduced_alignment_const\n reduced alignment accounting for non-variable postitions\n\nalignment_patterns_const\n dict pattern -> (pos in reduced alignment, list of pos in full alignment)\n\nariable_positions\n list of variable positions needed to construct remaining", "id": "f2313:c0:m20"} {"signature": "def _fitch_state(self, node, pos):", "body": "state = self._fitch_intersect([k.state[pos] for k in node.clades])if len(state) == :state = np.concatenate([k.state[pos] for k in node.clades])return state", "docstring": "Determine the Fitch profile for a single character of the node's sequence.\nThe profile is essentially the intersection between the children's\nprofiles or, if the former is empty, the union of the profiles.\n\nParameters\n----------\n\n node : PhyloTree.Clade:\n Internal node which the profiles are to be determined\n\n pos : int\n Position in the node's sequence which the profiles should\n be determinedf for.\n\nReturns\n-------\n state : numpy.array\n Fitch profile for the character at position pos of the given node.", "id": "f2313:c0:m33"} {"signature": "@propertydef leaves_lookup(self):", "body": "return self._leaves_lookup", "docstring": "The :code:`{leaf-name:leaf-node}` dictionary. It enables fast\nsearch of a tree leaf object by its name.", "id": "f2313:c0:m2"} {"signature": "def _fitch_intersect(self, arrays):", "body": "def pairwise_intersect(arr1, arr2):s2 = set(arr2)b3 = [val for val in arr1 if val in s2]return b3arrays = list(arrays) N = len(arrays)while N > :arr1 = arrays.pop()arr2 = arrays.pop()arr = pairwise_intersect(arr1, arr2)arrays.append(arr)N = len(arrays)return arrays[]", "docstring": "Find the intersection of any number of 1D arrays.\nReturn the sorted, unique values that are in all of the input arrays.\nAdapted from numpy.lib.arraysetops.intersect1d", "id": "f2313:c0:m34"} {"signature": "def dict_sequence(self, node, keep_var_ambigs=False):", "body": "seq = {}node_seq = node.cseqif keep_var_ambigs and hasattr(node, \"\") and node.is_terminal():node_seq = node.original_cseqfor pos in self.nonref_positions:cseqLoc = self.full_to_reduced_sequence_map[pos]base = node_seq[cseqLoc]if self.ref[pos] != base:seq[pos] = basereturn seq", "docstring": "For VCF-based TreeAnc objects, we do not want to store the entire\nsequence on every node, as they could be large. Instead, this returns the dict\nof variants & their positions for this sequence. This is used in place of\n:py:meth:`treetime.TreeAnc.expanded_sequence` for VCF-based objects throughout TreeAnc. However, users can still\ncall :py:meth:`expanded_sequence` if they require the full sequence.\n\nParameters\n----------\n node : PhyloTree.Clade\n Tree node\n\nReturns\n-------\n seq : dict\n dict where keys are the basepair position (numbering from 0) and value is the variant call", "id": "f2313:c0:m31"} {"signature": "@propertydef gtr(self):", "body": "return self._gtr", "docstring": "The current GTR object.\n\n:setter: Sets the GTR object passed in\n:getter: Returns the current GTR object", "id": "f2313:c0:m3"} {"signature": "@propertydef ref(self):", "body": "return self._ref", "docstring": "Get the str reference nucleotide sequence currently used by TreeAnc.\nWhen having read alignment in from a VCF, this is what variants map to.\n\n:setter: Sets the string reference sequence\n:getter: Returns the string reference sequence", "id": "f2313:c0:m14"} {"signature": "def _store_compressed_sequence_pairs(self):", "body": "self.logger(\"\",)for node in self.tree.find_clades():if node.up is None:continueself._store_compressed_sequence_to_node(node)self.logger(\"\",)", "docstring": "Traverse the tree, and for each node store the compressed sequence pair.\n**Note** sequence reconstruction should be performed prior to calling\nthis method.", "id": "f2313:c0:m41"} {"signature": "def prune_short_branches(self):", "body": "self.logger(\"\", )for node in self.tree.find_clades():if node.up is None or node.is_terminal():continueif self.gtr.prob_t(node.up.cseq, node.cseq, ,pattern_multiplicity=self.multiplicity) > :node.up.clades = [k for k in node.up.clades if k != node] + node.cladesfor clade in node.clades:clade.up = node.up", "docstring": "If the branch length is less than the minimal value, remove the branch\nfrom the tree. **Requires** ancestral sequence reconstruction", "id": "f2313:c0:m48"} {"signature": "def expanded_sequence(self, node, include_additional_constant_sites=False):", "body": "if include_additional_constant_sites:L = self.seq_lenelse:L = self.seq_len - self.additional_constant_sitesreturn node.cseq[self.full_to_reduced_sequence_map[:L]]", "docstring": "Expand a nodes compressed sequence into the real sequence\n\nParameters\n----------\nnode : PhyloTree.Clade\n Tree node\n\nReturns\n-------\nseq : np.array\n Sequence as np.array of chars", "id": "f2313:c0:m30"} {"signature": "def date_uncertainty_due_to_rate(self, node, interval=(, )):", "body": "if hasattr(node, \"\"):from scipy.special import erfinvnsig = [np.sqrt()*erfinv(- + *x) if x*(-x) else for x in interval]l,c,u = [x[] for x in node.numdate_rate_variation]return np.array([c + x*np.abs(y-c) for x,y in zip(nsig, (l,u))])else:return None", "docstring": "use previously calculated variation of the rate to estimate\n the uncertainty in a particular numdate due to rate variation.\n\n Parameters\n ----------\n node : PhyloTree.Clade\n node for which the confidence interval is to be calculated\n interval : tuple, optional\n Array of length two, or tuple, defining the bounds of the confidence interval", "id": "f2314:c0:m15"} {"signature": "def timetree_likelihood(self):", "body": "LH = for node in self.tree.find_clades(order=''): if node.up is None: continueLH -= node.branch_length_interpolator(node.branch_length)if self.aln:LH += self.gtr.sequence_logLH(self.tree.root.cseq, pattern_multiplicity=self.multiplicity)return LH", "docstring": "Return the likelihood of the data given the current branch length in the tree", "id": "f2314:c0:m10"} {"signature": "def get_confidence_interval(self, node, interval = (, )):", "body": "rate_contribution = self.date_uncertainty_due_to_rate(node, interval)if hasattr(node, \"\"):min_date, max_date = [self.date2dist.to_numdate(x) for x in(node.marginal_pos_LH.xmax, node.marginal_pos_LH.xmin)]if node.marginal_inverse_cdf==\"\":return np.array([node.numdate, node.numdate])else:mutation_contribution = self.date2dist.to_numdate(node.marginal_inverse_cdf(np.array(interval))[::-])else:min_date, max_date = [-np.inf, np.inf]return self.combine_confidence(node.numdate, (min_date, max_date),c1=rate_contribution, c2=mutation_contribution)", "docstring": "If temporal reconstruction was done using the marginal ML mode, the entire distribution of\ntimes is available. This function determines the 90% (or other) confidence interval, defined as the\nrange where 5% of probability is below and above. Note that this does not necessarily contain\nthe highest probability position.\nIn absense of marginal reconstruction, it will return uncertainty based on rate\nvariation. If both are present, the wider interval will be returned.\n\nParameters\n----------\n\n node : PhyloTree.Clade\n The node for which the confidence interval is to be calculated\n\n interval : tuple, list\n Array of length two, or tuple, defining the bounds of the confidence interval\n\nReturns\n-------\n\n confidence_interval : numpy array\n Array with two numerical dates delineating the confidence interval", "id": "f2314:c0:m17"} {"signature": "def _assign_dates(self):", "body": "if self.tree is None:self.logger(\"\", )return ttconf.ERRORbad_branch_counter = for node in self.tree.find_clades(order=''):if node.name in self.date_dict:tmp_date = self.date_dict[node.name]if np.isscalar(tmp_date) and np.isnan(tmp_date):self.logger(\"\"%(node.name, str(tmp_date)), , warn=True)node.raw_date_constraint = Nonenode.bad_branch = Trueelse:try:tmp = np.mean(tmp_date)node.raw_date_constraint = tmp_datenode.bad_branch = Falseexcept:self.logger(\"\"%(node.name, str(tmp_date)), , warn=True)node.raw_date_constraint = Nonenode.bad_branch = Trueelse: node.raw_date_constraint = Noneif node.is_terminal():node.bad_branch = Trueelse:node.bad_branch = np.all([x.bad_branch for x in node])if node.is_terminal() and node.bad_branch:bad_branch_counter += if bad_branch_counter>self.tree.count_terminals()-:self.logger(\"\", , warn=True)return ttconf.ERRORreturn ttconf.SUCCESS", "docstring": "assign dates to nodes\n\n Returns\n -------\n str\n success/error code", "id": "f2314:c0:m1"} {"signature": "def explained_variance(self):", "body": "self.tree.root._v=for n in self.tree.get_nonterminals(order=''):for c in n:c._v = n._v + self.branch_value(c)raw = np.array([(self.tip_value(n), n._v) for n in self.tree.get_terminals()if self.tip_value(n) is not None])return np.corrcoef(raw.T)[,]", "docstring": "calculate standard explained variance\n\n Returns\n -------\n float\n r-value of the root-to-tip distance and time.\n independent of regression model, but dependent on root choice", "id": "f2315:c0:m6"} {"signature": "def recurse(self, full_matrix=False):", "body": "for n in self.tree.get_nonterminals(order=''):n_leaves = len(n._ii)if full_matrix: M = np.zeros((n_leaves, n_leaves), dtype=float)r = np.zeros(n_leaves, dtype=float)c_count = for c in n:ssq = self.branch_variance(c)nc = len(c._ii)if c.is_terminal():if full_matrix:M[c_count, c_count] = /ssqr[c_count] = /ssqelse:if full_matrix:M[c_count:c_count+nc, c_count:c_count+nc] = c.cinv - ssq*np.outer(c.r,c.r)/(+ssq*c.s)r[c_count:c_count+nc] = c.r/(+ssq*c.s)c_count += ncif full_matrix: n.cinv = Mn.r = r n.s = n.r.sum()", "docstring": "recursion to calculate inverse covariance matrix\n\nParameters\n----------\nfull_matrix : bool, optional\n if True, the entire inverse matrix is calculated. otherwise, only the weighing vector.", "id": "f2315:c0:m3"} {"signature": "def __init__(self, tree_in, tip_value = None,branch_value = None, branch_variance = None):", "body": "super(TreeRegression, self).__init__()self.tree = tree_infor li, l in enumerate(self.tree.get_terminals()):l._ii = np.array([li])total_bl = for n in self.tree.get_nonterminals(order=''):n._ii = np.concatenate([c._ii for c in n])n._ii.sort()for c in n:c.up=ntotal_bl+=c.branch_lengthself.tree.root.up=Noneself.N = self.tree.root._ii.shape[]if tip_value is None:self.tip_value = lambda x:np.mean(x.numdate) if x.is_terminal() else Noneelse:self.tip_value = tip_valueif branch_value is None:self.branch_value = lambda x:x.branch_lengthelse:self.branch_value = branch_valueif branch_variance is None:self.branch_variance = lambda x:x.branch_length + *total_bl/self.Nelse:self.branch_variance = branch_variance", "docstring": "Parameters\n----------\n T : (Bio.Phylo.tree)\n Tree for which the covariances and regression\n are to be calculated.\n\n tip_value : (callable)\n function that for each tip returns the value to\n be used in the regression.\n\n branch_value : (callable)\n function that for each node of the tree returns\n the contribution of this branch to the value of\n the subtending tips.\n\n variance_function : (callable)\n function that for each node of the tree returns\n the accumulated variance", "id": "f2315:c0:m0"} {"signature": "def optimal_reroot(self, force_positive=True, slope=None):", "body": "best_root = self.find_best_root(force_positive=force_positive, slope=slope)best_node = best_root[\"\"]x = best_root[\"\"]if x<:new_node = best_nodeelif x>-:new_node = best_node.upelse:new_node = Phylo.BaseTree.Clade()new_node.branch_length = best_node.branch_length*(-x)new_node.up = best_node.upnew_node.clades = [best_node]new_node.up.clades = [k if k!=best_node else new_nodefor k in best_node.up.clades]best_node.branch_length *= xbest_node.up = new_nodenew_node.rtt_regression = best_rootself.tree.root_with_outgroup(new_node)self.tree.ladderize()for n in self.tree.get_nonterminals(order=''):for c in n:c.up=nreturn best_root", "docstring": "determine the best root and reroot the tree to this value.\nNote that this can change the parent child relations of the tree\nand values associated with branches rather than nodes\n(e.g. confidence) might need to be re-evaluated afterwards\n\nParameters\n----------\nforce_positive : bool, optional\n if True, the search for a root will only consider positive rate estimates\n\nslope : float, optional\n if given, it will find the optimal root given a fixed rate. If slope==0, this\n corresponds to minimal root-to-tip variance rooting (min_dev)\n\nReturns\n-------\ndict\n regression parameters", "id": "f2315:c0:m10"} {"signature": "def Cov(self):", "body": "M = np.zeros((self.N, self.N))for n in self.tree.find_clades():if n == self.tree.root:continueM[np.meshgrid(n._ii, n._ii)] += self.branch_variance(n)return M", "docstring": "calculate the covariance matrix of the tips assuming variance\nhas accumulated along branches of the tree accoriding to the\nthe provided\nReturns\n-------\n\n M : (np.array)\n covariance matrix with tips arranged standard transersal order.", "id": "f2315:c0:m1"} {"signature": "def CovInv(self):", "body": "self.recurse(full_matrix=True)return self.tree.root.cinv", "docstring": "Inverse of the covariance matrix\n\nReturns\n-------\n\n H : (np.array)\n inverse of the covariance matrix.", "id": "f2315:c0:m2"} {"signature": "def regression(self, slope=None):", "body": "self._calculate_averages()clock_model = base_regression(self.tree.root.Q, slope)clock_model[''] = self.explained_variance()return clock_model", "docstring": "regress tip values against branch values\n\n Parameters\n ----------\n slope : None, optional\n if given, the slope isn't optimized\n\n Returns\n -------\n dict\n regression parameters", "id": "f2315:c0:m7"} {"signature": "def numdate_from_dist2root(self, d2r):", "body": "return (d2r-self.intercept)/self.clock_rate", "docstring": "estimate the numerical date based on the distance to root.\n-> crude dating of internal nodes", "id": "f2316:c0:m7"} {"signature": "def to_years(self, abs_t):", "body": "return abs_t / abs(self.clock_rate)", "docstring": "Convert the time before present measured in branch length units to years", "id": "f2316:c0:m5"} {"signature": "def get_time_before_present(self, numdate):", "body": "return (numeric_date() - numdate) * abs(self.clock_rate)", "docstring": "Convert the numeric date to the branch-len scale", "id": "f2316:c0:m4"} {"signature": "def F81(mu=, pi=None, alphabet=\"\", **kwargs):", "body": "if pi is None:pi=*np.ones(, dtype=float)num_chars = len(alphabets[alphabet])pi = np.array(pi, dtype=float)if num_chars != len(pi) :pi = np.ones((num_chars, ), dtype=float)print (\"\"\"\")W = np.ones((num_chars,num_chars))pi /= ( * np.sum(pi))gtr = GTR(alphabet=alphabets[alphabet])gtr.assign_rates(mu=mu, pi=pi, W=W)return gtr", "docstring": "Felsenstein 1981 model. Assumes non-equal concentrations across nucleotides,\nbut the transition rate between all states is assumed to be equal. See\nFelsenstein (1981), J. Mol. Evol. 17 (6): 368\u2013376. doi:10.1007/BF01734359\nfor details.\n\nCurrent implementation of the model does not account for the gaps (treatment of\ngaps as characters is possible if specify alphabet='nuc_gap').\n\nParameters\n-----------\n\n\n mu : float\n Substitution rate\n\n pi : numpy.array\n Nucleotide concentrations\n\n alphabet : str\n Alphabet to use. POsiible values are: ['nuc', 'nuc_gap'] Default 'nuc', which discounts al gaps.\n 'nuc_gap' alphabet enables treatmen of gaps as characters.", "id": "f2317:m2"} {"signature": "def K80(mu=, kappa=, **kwargs):", "body": "num_chars = len(alphabets[''])pi = np.ones(len(alphabets['']), dtype=float)/len(alphabets[''])W = _create_transversion_transition_W(kappa)gtr = GTR(alphabet=alphabets[''])gtr.assign_rates(mu=mu, pi=pi, W=W)return gtr", "docstring": "Kimura 1980 model. Assumes equal concentrations across nucleotides, but\nallows different rates between transitions and transversions. The ratio\nof the transversion/transition rates is given by kappa parameter.\nFor more info, see\nKimura (1980), J. Mol. Evol. 16 (2): 111\u2013120. doi:10.1007/BF01731581.\n\nCurrent implementation of the model does not account for the gaps.\n\nParameters\n-----------\n\n mu : float\n Overall substitution rate\n\n kappa : float\n Ratio of transversion/transition rates", "id": "f2317:m1"} {"signature": "def HKY85(mu=, pi=None, kappa=, **kwargs):", "body": "if pi is None:pi=*np.ones(, dtype=float)num_chars = len(alphabets[''])if num_chars != pi.shape[] :pi = np.ones((num_chars, ), dtype=float)print (\"\"\"\")W = _create_transversion_transition_W(kappa)pi /= pi.sum()gtr = GTR(alphabet=alphabets[''])gtr.assign_rates(mu=mu, pi=pi, W=W)return gtr", "docstring": "Hasegawa, Kishino and Yano 1985 model. Allows different concentrations of the\nnucleotides (as in F81) + distinguishes between transition/transversionsubstitutions\n(similar to K80). Link:\nHasegawa, Kishino, Yano (1985), J. Mol. Evol. 22 (2): 160\u2013174. doi:10.1007/BF02101694\n\nCurrent implementation of the model does not account for the gaps\n\nParameters\n-----------\n\n\n mu : float\n Substitution rate\n\n pi : numpy.array\n Nucleotide concentrations\n\n kappa : float\n Ratio of transversion/transition substitution rates", "id": "f2317:m3"} {"signature": "def optimal_t(self, seq_p, seq_ch, pattern_multiplicity=None, ignore_gaps=False):", "body": "seq_pair, multiplicity = self.compress_sequence_pair(seq_p, seq_ch,pattern_multiplicity = pattern_multiplicity,ignore_gaps=ignore_gaps)return self.optimal_t_compressed(seq_pair, multiplicity)", "docstring": "Find the optimal distance between the two sequences\n\nParameters\n----------\n\n seq_p : character array\n Parent sequence\n\n seq_c : character array\n Child sequence\n\n pattern_multiplicity : numpy array\n If sequences are reduced by combining identical alignment patterns,\n these multplicities need to be accounted for when counting the number\n of mutations across a branch. If None, all pattern are assumed to\n occur exactly once.\n\n ignore_gaps : bool\n If True, ignore gaps in distance calculations", "id": "f2318:c0:m15"} {"signature": "def _eig_sym(self):", "body": "tmpp = np.sqrt(self.Pi)symQ = self.W*np.outer(tmpp, tmpp)eigvals, eigvecs = np.linalg.eigh(symQ)tmp_v = eigvecs.T*tmppone_norm = np.sum(np.abs(tmp_v), axis=)self.v = tmp_v.T/one_normself.v_inv = (eigvecs*one_norm).T/tmppself.eigenvals = eigvals", "docstring": "Perform eigendecompositon of the rate matrix and stores the left- and right-\nmatrices to convert the sequence profiles to the GTR matrix eigenspace\nand hence to speed-up the computations.", "id": "f2318:c0:m11"} {"signature": "def prob_t_compressed(self, seq_pair, multiplicity, t, return_log=False):", "body": "if t<:logP = -ttconf.BIG_NUMBERelse:tmp_eQT = self.expQt(t)bad_indices=(tmp_eQT==)logQt = np.log(tmp_eQT + ttconf.TINY_NUMBER*(bad_indices))logQt[np.isnan(logQt) | np.isinf(logQt) | bad_indices] = -ttconf.BIG_NUMBERlogP = np.sum(logQt[seq_pair[:,], seq_pair[:,]]*multiplicity)return logP if return_log else np.exp(logP)", "docstring": "Calculate the probability of observing a sequence pair at a distance t,\nfor compressed sequences\n\nParameters\n----------\n\n seq_pair : numpy array\n :code:`np.array([(0,1), (2,2), ()..])` as indicies of\n pairs of aligned positions. (e.g. 'A'==0, 'C'==1 etc).\n This only lists all occuring parent-child state pairs, order is irrelevant\n\n multiplicity : numpy array\n The number of times a parent-child state pair is observed.\n This allows compression of the sequence representation\n\n t : float\n Length of the branch separating parent and child\n\n return_log : bool\n Whether or not to exponentiate the result", "id": "f2318:c0:m13"} {"signature": "def __str__(self):", "body": "multi_site = len(self.Pi.shape)==if multi_site:eq_freq_str = \"\"+str(np.round(self.average_rate,))+''else:eq_freq_str = \"\"+str(np.round(self.mu,))+''if not multi_site:eq_freq_str += \"\"for a,p in zip(self.alphabet, self.Pi):eq_freq_str+=''+str(a)+''+str(np.round(p,))+''W_str = \"\"W_str+=''+''.join(map(str, self.alphabet))+''for a,Wi in zip(self.alphabet, self.W):W_str+= ''+str(a)+''+''.join([str(np.round(max(,p),)) for p in Wi])+''if not multi_site:Q_str = \"\"Q_str+=''+''.join(map(str, self.alphabet))+''for a,Qi in zip(self.alphabet, self.Q):Q_str+= ''+str(a)+''+''.join([str(np.round(max(,p),)) for p in Qi])+''return eq_freq_str + W_str + Q_str", "docstring": "String representation of the GTR model for pretty printing", "id": "f2318:c0:m3"} {"signature": "def _check_fix_Q(self, fixed_mu=False):", "body": "self.Pi /= self.Pi.sum() self.W += self.break_degen + self.break_degen.Tnp.fill_diagonal(self.W, )Wdiag = -(self.Q).sum(axis=)/self.Pinp.fill_diagonal(self.W, Wdiag)scale_factor = -np.sum(np.diagonal(self.Q)*self.Pi)self.W /= scale_factorif not fixed_mu:self.mu *= scale_factorif (self.Q.sum(axis=) < ).sum() < self.alphabet.shape[]: print (\"\", self.Q.sum(axis=))import ipdb; ipdb.set_trace()raise ArithmeticError(\"\")", "docstring": "Check the main diagonal of Q and fix it in case it does not corresond\nthe definition of the rate matrix. Should be run every time when creating\ncustom GTR model.", "id": "f2318:c0:m9"} {"signature": "def prob_t(self, seq_p, seq_ch, t, pattern_multiplicity = None,return_log=False, ignore_gaps=True):", "body": "seq_pair, multiplicity = self.compress_sequence_pair(seq_p, seq_ch,pattern_multiplicity=pattern_multiplicity, ignore_gaps=ignore_gaps)return self.prob_t_compressed(seq_pair, multiplicity, t, return_log=return_log)", "docstring": "Compute the probability to observe seq_ch (child sequence) after time t starting from seq_p\n(parent sequence).\n\nParameters\n----------\n\n seq_p : character array\n Parent sequence\n\n seq_c : character array\n Child sequence\n\n t : double\n Time (branch len) separating the profiles.\n\n pattern_multiplicity : numpy array\n If sequences are reduced by combining identical alignment patterns,\n these multplicities need to be accounted for when counting the number\n of mutations across a branch. If None, all pattern are assumed to\n occur exactly once.\n\n return_log : bool\n It True, return log-probability.\n\nReturns\n-------\n prob : np.array\n Resulting probability", "id": "f2318:c0:m14"} {"signature": "def expQt(self, t):", "body": "eLambdaT = np.diag(self._exp_lt(t)) Qs = self.v.dot(eLambdaT.dot(self.v_inv)) return np.maximum(,Qs)", "docstring": "Parameters\n----------\n\n t : float\n Time to propagate\n\nReturns\n--------\n\n expQt : numpy.array\n Matrix exponential of exo(Qt)", "id": "f2318:c0:m21"} {"signature": "def run(self, root=None, infer_gtr=True, relaxed_clock=None, n_iqd = None,resolve_polytomies=True, max_iter=, Tc=None, fixed_clock_rate=None,time_marginal=False, sequence_marginal=False, branch_length_mode='',vary_rate=False, use_covariation=False, **kwargs):", "body": "self.use_covariation = use_covariationif (self.tree is None) or (self.aln is None and self.seq_len is None):self.logger(\"\", )return ttconf.ERRORif (self.aln is None):branch_length_mode=''self._set_branch_length_mode(branch_length_mode)seq_kwargs = {\"\":sequence_marginal or (self.branch_length_mode==''),\"\":\"\"}tt_kwargs = {'':fixed_clock_rate, '':False}tt_kwargs.update(kwargs)seq_LH = if \"\" in kwargs:seq_kwargs[\"\"] = kwargs[\"\"]if \"\" in kwargs:time_marginal=kwargs[\"\"]if self.branch_length_mode=='':if self.aln:self.infer_ancestral_sequences(infer_gtr=infer_gtr, **seq_kwargs)self.prune_short_branches()else:self.optimize_sequences_and_branch_length(infer_gtr=infer_gtr,max_iter=, prune_short=True, **seq_kwargs)avg_root_to_tip = np.mean([x.dist2root for x in self.tree.get_terminals()])if n_iqd or root=='':if \"\" in kwargs and kwargs[\"\"]:plot_rtt=Trueelse:plot_rtt=Falsereroot_mechanism = '' if root=='' else rootif self.clock_filter(reroot=reroot_mechanism, n_iqd=n_iqd, plot=plot_rtt)==ttconf.ERROR:return ttconf.ERRORelif root is not None:if self.reroot(root=root)==ttconf.ERROR:return ttconf.ERRORif self.branch_length_mode=='':if self.aln:self.infer_ancestral_sequences(**seq_kwargs)else:self.optimize_sequences_and_branch_length(max_iter=, prune_short=False,**seq_kwargs)self.logger(\"\",)self.make_time_tree(**tt_kwargs)if self.aln:seq_LH = self.tree.sequence_marginal_LH if seq_kwargs[''] else self.tree.sequence_joint_LHself.LH =[[seq_LH, self.tree.positional_joint_LH, ]]if root is not None and max_iter:if self.reroot(root='' if root=='' else root)==ttconf.ERROR:return ttconf.ERRORniter = ndiff = need_new_time_tree=Falsewhile niter < max_iter:self.logger(\"\"%(niter+,max_iter),)if Tc:if Tc=='' and niter:tmpTc=''else:tmpTc=Tcself.add_coalescent_model(tmpTc, **kwargs)need_new_time_tree = Trueif relaxed_clock:print(\"\", relaxed_clock)self.relaxed_clock(**relaxed_clock)need_new_time_tree = Truen_resolved=if resolve_polytomies:n_resolved = self.resolve_polytomies()if n_resolved:self.prepare_tree()if self.branch_length_mode!='': self.optimize_sequences_and_branch_length(prune_short=False,max_iter=, **seq_kwargs)need_new_time_tree = Trueif need_new_time_tree:self.make_time_tree(**tt_kwargs)if self.aln:ndiff = self.infer_ancestral_sequences('',**seq_kwargs)else: if self.aln:ndiff = self.infer_ancestral_sequences('',**seq_kwargs)self.make_time_tree(**tt_kwargs)self.tree.coalescent_joint_LH = self.merger_model.total_LH() if Tc else if self.aln:seq_LH = self.tree.sequence_marginal_LH if seq_kwargs[''] else self.tree.sequence_joint_LHself.LH.append([seq_LH, self.tree.positional_joint_LH, self.tree.coalescent_joint_LH])niter+=if ndiff== and n_resolved== and Tc!='':self.logger(\"\",)breakif vary_rate:if type(vary_rate)==float:res = self.calc_rate_susceptibility(rate_std=vary_rate, params=tt_kwargs)elif self.clock_model['']:res = self.calc_rate_susceptibility(params=tt_kwargs)else:res = ttconf.ERRORif res==ttconf.ERROR:self.logger(\"\", , warn=True)if time_marginal:self.logger(\"\", )tt_kwargs['']=time_marginalself.make_time_tree(**tt_kwargs)bad_branches =[n for n in self.tree.get_terminals()if n.bad_branch and n.raw_date_constraint]if bad_branches:self.logger(\"\"\"\",,warn=True)for n in bad_branches:self.logger(\"\"%(n.name, str(n.raw_date_constraint), n.numdate),,warn=True)return ttconf.SUCCESS", "docstring": "Run TreeTime reconstruction. Based on the input parameters, it divides\nthe analysis into semi-independent jobs and conquers them one-by-one,\ngradually optimizing the tree given the temporal constarints and leaf\nnode sequences.\n\nParameters\n----------\nroot : str\n Try to find better root position on a given tree. If string is passed,\n the root will be searched according to the specified method. If none,\n use tree as-is.\n\n See :py:meth:`treetime.TreeTime.reroot` for available rooting methods.\n\ninfer_gtr : bool\n If True, infer GTR model\n\nrelaxed_clock : dic\n If not None, use autocorrelated molecular clock model. Specify the\n clock parameters as :code:`{slack:, coupling:}` dictionary.\n\nn_iqd : int\n If not None, filter tree nodes which do not obey the molecular clock\n for the particular tree. The nodes, which deviate more than\n :code:`n_iqd` interquantile intervals from the molecular clock\n regression will be marked as 'BAD' and not used in the TreeTime\n analysis\n\nresolve_polytomies : bool\n If True, attempt to resolve multiple mergers\n\nmax_iter : int\n Maximum number of iterations to optimize the tree\n\nTc : float, str\n If not None, use coalescent model to correct the branch lengths by\n introducing merger costs.\n\n If Tc is float, it is interpreted as the coalescence time scale\n\n If Tc is str, it should be one of (:code:`opt`, :code:`const`, :code:`skyline`)\n\nfixed_clock_rate : float\n Fixed clock rate to be used. If None, infer clock rate from the molecular clock.\n\ntime_marginal : bool\n If True, perform a final round of marginal reconstruction of the node's positions.\n\nsequence_marginal : bool, optional\n use marginal reconstruction for ancestral sequences\n\nbranch_length_mode : str\n Should be one of: :code:`joint`, :code:`marginal`, :code:`input`.\n\n If 'input', rely on the branch lengths in the input tree and skip directly\n to the maximum-likelihood ancestral sequence reconstruction.\n Otherwise, perform preliminary sequence reconstruction using parsimony\n algorithm and do branch length optimization\n\nvary_rate : bool or float, optional\n redo the time tree estimation for rates +/- one standard deviation.\n if a float is passed, it is interpreted as standard deviation,\n otherwise this standard deviation is estimated from the root-to-tip regression\n\nuse_covariation : bool, optional\n default False, if False, rate estimates will be performed using simple\n regression ignoring phylogenetic covaration between nodes.\n\n**kwargs\n Keyword arguments needed by the downstream functions\n\n\nReturns\n-------\nTreeTime error/succces code : str\n return value depending on success or error", "id": "f2319:c0:m1"} {"signature": "def _find_best_root(self, covariation=True, force_positive=True, slope=, **kwarks):", "body": "for n in self.tree.find_clades():n.branch_length=n.mutation_lengthself.logger(\"\",)Treg = self.setup_TreeRegression(covariation=covariation)return Treg.optimal_reroot(force_positive=force_positive, slope=slope)['']", "docstring": "Determine the node that, when the tree is rooted on this node, results\nin the best regression of temporal constraints and root to tip distances.\n\nParameters\n----------\n\n infer_gtr : bool\n If True, infer new GTR model after re-root\n\n covariation : bool\n account for covariation structure when rerooting the tree\n\n force_positive : bool\n only accept positive evolutionary rate estimates when rerooting the tree", "id": "f2319:c0:m11"} {"signature": "def plot_vs_years(tt, step = None, ax=None, confidence=None, ticks=True, **kwargs):", "body": "import matplotlib.pyplot as plttt.branch_length_to_years()nleafs = tt.tree.count_terminals()if ax is None:fig = plt.figure(figsize=(,))ax = plt.subplot()else:fig = Noneif \"\" not in kwargs:kwargs[\"\"] = lambda x:x.name if (x.is_terminal() and nleafs<) else \"\"Phylo.draw(tt.tree, axes=ax, **kwargs)offset = tt.tree.root.numdate - tt.tree.root.branch_lengthdate_range = np.max([n.numdate for n in tt.tree.get_terminals()])-offsetif step is None or (step> and date_range/step>):step = **np.floor(np.log10(date_range))if date_range/step<:step/=elif date_range/step<:step/=step = max(/,step)if step:dtick = stepmin_tick = step*(offset//step)extra = dtick if dticktick_vals = np.arange(min_tick, min_tick+date_range+extra, dtick)xticks = tick_vals - offsetelse:xticks = ax.get_xticks()dtick = xticks[]-xticks[]shift = offset - dtick*(offset//dtick)xticks -= shifttick_vals = [x+offset-shift for x in xticks]ax.set_xticks(xticks)ax.set_xticklabels(map(str, tick_vals))ax.set_xlabel('')ax.set_ylabel('')ax.set_xlim((,date_range))if step:ylim = ax.get_ylim()xlim = ax.get_xlim()from matplotlib.patches import Rectanglefor yi,year in enumerate(np.arange(np.floor(tick_vals[]), tick_vals[-]+, step)):pos = year - offsetr = Rectangle((pos, ylim[]-),step, ylim[]-ylim[]+,facecolor=[+*(+yi%)] * ,edgecolor=[,,])ax.add_patch(r)if year in tick_vals and pos>=xlim[] and pos<=xlim[] and ticks:label_str = str(step*(year//step)) if step< else str(int(year))ax.text(pos,ylim[]-*(ylim[]-ylim[]), label_str,horizontalalignment='')ax.set_axis_off()if confidence:tree_layout(tt.tree)if not hasattr(tt.tree.root, \"\"):print(\"\")return ttconf.ERRORelif type(confidence) is float:cfunc = tt.get_max_posterior_regionelif len(confidence)==:cfunc = tt.get_confidence_intervalelse:print(\"\")return ttconf.ERRORfor n in tt.tree.find_clades():pos = cfunc(n, confidence)ax.plot(pos-offset, np.ones(len(pos))*n.ypos, lw=, c=(,,))return fig, ax", "docstring": "Converts branch length to years and plots the time tree on a time axis.\n\nParameters\n----------\n tt : TreeTime object\n A TreeTime instance after a time tree is inferred\n\n step : int\n Width of shaded boxes indicating blocks of years. Will be inferred if not specified.\n To switch off drawing of boxes, set to 0\n\n ax : matplotlib axes\n Axes to be used to plot, will create new axis if None\n\n confidence : tuple, float\n Draw confidence intervals. This assumes that marginal time tree inference was run.\n Confidence intervals are either specified as an interval of the posterior distribution\n like (0.05, 0.95) or as the weight of the maximal posterior region , e.g. 0.9\n\n **kwargs : dict\n Key word arguments that are passed down to Phylo.draw", "id": "f2319:m0"} {"signature": "def _set_branch_length_mode(self, branch_length_mode):", "body": "if branch_length_mode in ['', '', '']:self.branch_length_mode = branch_length_modeelif self.aln:bl_dis = [n.branch_length for n in self.tree.find_clades() if n.up]max_bl = np.max(bl_dis)if max_bl>:bl_mode = ''else:bl_mode = ''self.logger(\"\"%(max_bl, bl_mode),)self.branch_length_mode = bl_modeelse:self.branch_length_mode = ''", "docstring": "if branch_length mode is not explicitly set, set according to\nempirical branch length distribution in input tree\n\nParameters\n----------\n\n branch_length_mode : str, 'input', 'joint', 'marginal'\n if the maximal branch length in the tree is longer than 0.05, this will\n default to 'input'. Otherwise set to 'joint'", "id": "f2319:c0:m2"} {"signature": "def setMaxDemandResetNow(self, password=\"\"):", "body": "result = Falseself.setContext(\"\")try:if len(password) != :self.writeCmdMsg(\"\")self.setContext(\"\")return resultif not self.request(False):self.writeCmdMsg(\"\")else:if not self.serialCmdPwdAuth(password):self.writeCmdMsg(\"\")else:req_str = \"\" + binascii.hexlify(str().zfill()) + \"\"req_str += self.calc_crc16(req_str[:].decode(\"\"))self.m_serial_port.write(req_str.decode(\"\"))if self.m_serial_port.getResponse(self.getContext()).encode(\"\") == \"\":self.writeCmdMsg(\"\")result = Trueself.serialPostEnd()except:ekm_log(traceback.format_exc(sys.exc_info()))self.setContext(\"\")return result", "docstring": "Serial call zero max demand (Dash Now button)\n\n Args:\n password (str): Optional password\n\n Returns:\n bool: True on completion with ACK.", "id": "f2325:c29:m28"} {"signature": "def extractHolidayWeekendSchedules(self):", "body": "result = namedtuple(\"\", [\"\", \"\"])result.Weekend = self.m_hldy[\"\"][MeterData.StringValue]result.Holiday = self.m_hldy[\"\"][MeterData.StringValue]return result", "docstring": "extract holiday and weekend :class:`~ekmmeters.Schedule` from meter object buffer.\n\n Returns:\n tuple: Holiday and weekend :class:`~ekmmeters.Schedule` values, as strings.\n\n ======= ======================================\n Holiday :class:`~ekmmeters.Schedule` as string\n Weekend :class:`~ekmmeters.Schedule` as string\n ======= ======================================", "id": "f2325:c29:m44"} {"signature": "def initPort(self):", "body": "try:self.m_ser = serial.Serial(port=self.m_ttyport,baudrate=self.m_baudrate,timeout=,parity=serial.PARITY_EVEN,stopbits=serial.STOPBITS_ONE,bytesize=serial.SEVENBITS,rtscts=False)ekm_log(\"\" + serial.VERSION)ekm_log(\"\" + self.m_ttyport)ekm_log(\"\" + str(self.m_baudrate))time.sleep(self.m_init_wait)return Trueexcept:ekm_log(traceback.format_exc(sys.exc_info()))return False", "docstring": "Required initialization call, wraps pyserial constructor.", "id": "f2325:c26:m1"} {"signature": "def setConnectString(self, connection_string):", "body": "self.m_connection_string = connection_stringpass", "docstring": "Setter for connection string.\n Args:\n connection_string (str): Connection string.", "id": "f2325:c27:m1"} {"signature": "def getReadBuffer(self):", "body": "ekm_log(\"\")empty = SerialBlock()return empty", "docstring": "Required override to fetch the read serial block.\n\n Returns:\n SerialBlock: Every supported field (A or A+B, includes all fields)", "id": "f2325:c29:m2"} {"signature": "def dict_factory(self, cursor, row):", "body": "d = {}for idx, col in enumerate(cursor.description):val = row[idx]name = col[]if name == Field.Time_Stamp:d[col[]] = str(val)continueif name == \"\" or name == \"\": continueif name not in self.m_all_fields:continueif (str(val) != \"\") and ((val > ) or (val < )):d[name] = str(val)return d", "docstring": "Sqlite callback accepting the cursor and the original row as a tuple.\n\n Simple return of JSON safe types.\n\n Args:\n cursor (sqlite cursor): Original cursory\n row (sqlite row tuple): Original row.\n\n Returns:\n dict: modified row.", "id": "f2325:c28:m2"} {"signature": "def extractMonthTariff(self, month):", "body": "ret = namedtuple(\"\", [\"\", Field.kWh_Tariff_1, Field.kWh_Tariff_2, Field.kWh_Tariff_3,Field.kWh_Tariff_4, Field.kWh_Tot, Field.Rev_kWh_Tariff_1,Field.Rev_kWh_Tariff_2, Field.Rev_kWh_Tariff_3,Field.Rev_kWh_Tariff_4, Field.Rev_kWh_Tot])month += ret.Month = str(month)if (month < ) or (month > Extents.Months):ret.kWh_Tariff_1 = ret.kWh_Tariff_2 = ret.kWh_Tariff_3 = ret.kWh_Tariff_4 = str()ret.Rev_kWh_Tariff_1 = ret.Rev_kWh_Tariff_2 = ret.Rev_kWh_Tariff_3 = ret.Rev_kWh_Tariff_4 = str()ret.kWh_Tot = ret.Rev_kWh_Tot = str()ekm_log(\"\" + str(month))return retbase_str = \"\" + str(month) + \"\"ret.kWh_Tariff_1 = self.m_mons[base_str + \"\"][MeterData.StringValue]ret.kWh_Tariff_2 = self.m_mons[base_str + \"\"][MeterData.StringValue]ret.kWh_Tariff_3 = self.m_mons[base_str + \"\"][MeterData.StringValue]ret.kWh_Tariff_4 = self.m_mons[base_str + \"\"][MeterData.StringValue]ret.kWh_Tot = self.m_mons[base_str + \"\"][MeterData.StringValue]ret.Rev_kWh_Tariff_1 = self.m_rev_mons[base_str + \"\"][MeterData.StringValue]ret.Rev_kWh_Tariff_2 = self.m_rev_mons[base_str + \"\"][MeterData.StringValue]ret.Rev_kWh_Tariff_3 = self.m_rev_mons[base_str + \"\"][MeterData.StringValue]ret.Rev_kWh_Tariff_4 = self.m_rev_mons[base_str + \"\"][MeterData.StringValue]ret.Rev_kWh_Tot = self.m_rev_mons[base_str + \"\"][MeterData.StringValue]return ret", "docstring": "Extract the tariff for a single month from the meter object buffer.\n\n Args:\n month (int): A :class:`~ekmmeters.Months` value or range(Extents.Months).\n\n Returns:\n tuple: The eight tariff period totals for month. The return tuple breaks out as follows:\n\n ================= ======================================\n kWh_Tariff_1 kWh for tariff period 1 over month.\n kWh_Tariff_2 kWh for tariff period 2 over month\n kWh_Tariff_3 kWh for tariff period 3 over month\n kWh_Tariff_4 kWh for tariff period 4 over month\n kWh_Tot Total kWh over requested month\n Rev_kWh_Tariff_1 Rev kWh for tariff period 1 over month\n Rev_kWh_Tariff_3 Rev kWh for tariff period 2 over month\n Rev_kWh_Tariff_3 Rev kWh for tariff period 3 over month\n Rev_kWh_Tariff_4 Rev kWh for tariff period 4 over month\n Rev_kWh_Tot Total Rev kWh over requested month\n ================= ======================================", "id": "f2325:c29:m41"} {"signature": "def getMeterAddress(self):", "body": "return self.m_meter_address", "docstring": "Getter for meter object 12 character address.\n\n Returns:\n str: 12 character address on front of meter", "id": "f2325:c29:m17"} {"signature": "def getField(self, fld_name):", "body": "result = \"\"if fld_name in self.m_req:result = self.m_req[fld_name][MeterData.StringValue]else:ekm_log(\"\" + fld_name)return result", "docstring": "Return :class:`~ekmmeters.Field` content, scaled and formatted.\n\n Args:\n fld_name (str): A `:class:~ekmmeters.Field` value which is on your meter.\n\n Returns:\n str: String value (scaled if numeric) for the field.", "id": "f2325:c33:m10"} {"signature": "def insert(self, meter_db):", "body": "if meter_db:meter_db.dbInsert(self.m_req, self.m_raw_read_a, self.m_raw_read_b)else:ekm_log(\"\")pass", "docstring": "Insert to :class:`~ekmmeters.MeterDB` subclass.\n\n Please note MeterDB subclassing is only for simplest-case.\n\n Args:\n meter_db (MeterDB): Instance of subclass of MeterDB.", "id": "f2325:c32:m6"} {"signature": "def requestB(self):", "body": "work_context = self.getContext()self.setContext(\"\")self.m_serial_port.write(\"\".decode(\"\") + self.m_meter_address + \"\".decode(\"\"))self.m_raw_read_b = self.m_serial_port.getResponse(self.getContext())unpacked_read_b = self.unpackStruct(self.m_raw_read_b, self.m_blk_b)self.convertData(unpacked_read_b, self.m_blk_b, self.m_kwh_precision)self.m_b_crc = self.crcMeterRead(self.m_raw_read_b, self.m_blk_b)self.setContext(work_context)return self.m_b_crc", "docstring": "Issue a B read on V4 meter.\n\n Returns:\n bool: True if CRC match at end of call.", "id": "f2325:c33:m7"} {"signature": "def writeCmdMsg(self, msg):", "body": "ekm_log(\"\" + self.getContext() + \"\" + msg)self.m_command_msg = msg", "docstring": "Internal method to set the command result string.\n\n Args:\n msg (str): Message built during command.", "id": "f2325:c29:m46"} {"signature": "def fillCreate(self, qry_str):", "body": "count = for fld in self.m_all_fields:fld_type = self.m_all_fields[fld][MeterData.TypeValue]fld_len = self.m_all_fields[fld][MeterData.SizeValue]qry_spec = self.mapTypeToSql(fld_type, fld_len)if count > :qry_str += \"\"qry_str = qry_str + '' + fld + '' + qry_speccount += qry_str += (\"\" + Field.Time_Stamp + \"\" +\"\" +\"\")return qry_str", "docstring": "Return query portion below CREATE.\n Args:\n qry_str (str): String as built.\n\n Returns:\n string: Passed string with fields appended.", "id": "f2325:c27:m4"} {"signature": "def registerObserver(self, observer):", "body": "self.m_observers.append(observer)pass", "docstring": "Place an observer in the meter update() chain.\n\n Args:\n observer (MeterObserver): Subclassed MeterObserver.", "id": "f2325:c29:m18"} {"signature": "def getName(self):", "body": "return self.m_ttyport", "docstring": "Getter for serial port name\n\n Returns:\n string: name of serial port (ex: 'COM3', '/dev/ttyS0')", "id": "f2325:c26:m2"} {"signature": "def getSchedulesBuffer(self, period_group):", "body": "empty_return = SerialBlock()if period_group == ReadSchedules.Schedules_1_To_4:return self.m_schd_1_to_4elif period_group == ReadSchedules.Schedules_5_To_6:return self.m_schd_5_to_6else:return empty_return", "docstring": "Return the requested tariff schedule :class:`~ekmmeters.SerialBlock` for meter.\n\n Args:\n period_group (int): A :class:`~ekmmeters.ReadSchedules` value.\n\n Returns:\n SerialBlock: The requested tariff schedules for meter.", "id": "f2325:c29:m22"} {"signature": "def setContext(self, context_str):", "body": "if (len(self.m_context) == ) and (len(context_str) >= ):if context_str[:] != \"\":ekm_log(\"\" + context_str)self.m_context = context_str", "docstring": "Set context string for serial command. Private setter.\n\n Args:\n context_str (str): Command specific string.", "id": "f2325:c29:m5"} {"signature": "def serialCmdPwdAuth(self, password_str):", "body": "result = Falsetry:req_start = \"\" + binascii.hexlify(password_str) + \"\"req_crc = self.calc_crc16(req_start[:].decode(\"\"))req_str = req_start + req_crcself.m_serial_port.write(req_str.decode(\"\"))if self.m_serial_port.getResponse(self.getContext()).encode(\"\") == \"\":ekm_log(\"\" + self.getContext() + \"\")result = Trueelse:ekm_log(\"\" + self.getContext() + \"\")except:ekm_log(\"\" + self.getContext() + \"\")ekm_log(traceback.format_exc(sys.exc_info()))return result", "docstring": "Password step of set commands\n\n This method is normally called within another serial command, so it\n does not issue a termination string. Any default password is set\n in the caller parameter list, never here.\n\n Args:\n password_str (str): Required password.\n\n Returns:\n bool: True on completion and ACK.", "id": "f2325:c29:m49"} {"signature": "def attachPort(self, serial_port):", "body": "self.m_serial_port = serial_portpass", "docstring": "Attach required :class:`~ekmmeters.SerialPort`.\n\n Args:\n serial_port (SerialPort): Serial port object, does not need to be initialized.", "id": "f2325:c32:m1"} {"signature": "def setZeroResettableKWH(self, password=\"\"):", "body": "result = Falseself.setContext(\"\")try:if not self.requestA():self.writeCmdMsg(\"\")else:if not self.serialCmdPwdAuth(password):self.writeCmdMsg(\"\")else:req_str = \"\"req_str += self.calc_crc16(req_str[:].decode(\"\"))self.m_serial_port.write(req_str.decode(\"\"))if self.m_serial_port.getResponse(self.getContext()).encode(\"\") == \"\":self.writeCmdMsg(\"\")result = Trueself.serialPostEnd()except:ekm_log(traceback.format_exc(sys.exc_info()))self.setContext(\"\")return result", "docstring": "Serial call to zero resettable kWh registers.\n\n Args:\n password (str): Optional password.\n\n Returns:\n bool: True on completion and ACK.", "id": "f2325:c33:m19"} {"signature": "def write(self, output):", "body": "view_str = output.encode('', '')if (len(view_str) > ):self.m_ser.write(view_str)self.m_ser.flush()self.m_ser.reset_input_buffer()time.sleep(self.m_force_wait)pass", "docstring": "Passthrough for pyserial Serial.write().\n\n Args:\n output (str): Block to write to port", "id": "f2325:c26:m4"} {"signature": "def attachPort(self, serial_port):", "body": "self.m_serial_port = serial_portpass", "docstring": "Required override to attach the port to the meter.\n\n Args:\n serial_port (SerialPort): Declared serial port. Does not need to be initialized.", "id": "f2325:c33:m1"} {"signature": "def __init__(self, interval):", "body": "super(IntervalObserver, self).__init__()self.m_interval = intervalself.m_summary = SerialBlock()pass", "docstring": "Args:\n interval (int): Interval to summarize", "id": "f2325:c31:m0"} {"signature": "def __init__(self, ttyport, baudrate=, force_wait = ):", "body": "self.m_ttyport = ttyportself.m_baudrate = baudrateself.m_ser = Noneself.m_fd = Noneself.m_max_waits = self.m_wait_sleep = self.m_force_wait = force_waitself.m_init_wait = pass", "docstring": "Args:\n ttyport (str): port name, ex 'COM3' '/dev/ttyUSB0'\n baudrate (int): optional, 9600 default and recommended\n force_wait(float) : optional post commnd sleep, if required", "id": "f2325:c26:m0"} {"signature": "def serialPostEnd(self):", "body": "ekm_log(\"\" + self.m_context + \"\")self.m_serial_port.write(\"\".decode(\"\"))pass", "docstring": "Post termination code to implicitly current meter.", "id": "f2325:c32:m10"} {"signature": "def __init__(self, connection_string):", "body": "self.m_connection_string = connection_stringself.m_all_fields = SerialBlock()self.combineAB()pass", "docstring": "Args:\n connection_string (str): database appropriate connection string", "id": "f2325:c27:m0"} {"signature": "def initWorkFormat(self):", "body": "self.m_blk_a[\"\"] = [, FieldType.Hex, ScaleType.No, \"\", , False, False]self.m_blk_a[Field.Model] = [, FieldType.Hex, ScaleType.No, \"\", , False, True]self.m_blk_a[Field.Firmware] = [, FieldType.Hex, ScaleType.No, \"\", , False, True]self.m_blk_a[Field.Meter_Address] = [, FieldType.String, ScaleType.No, \"\", , False, True]self.m_blk_a[Field.kWh_Tot] = [, FieldType.Float, ScaleType.KWH, \"\", , False, False]self.m_blk_a[Field.kWh_Tariff_1] = [, FieldType.Float, ScaleType.KWH, \"\", , False, False]self.m_blk_a[Field.kWh_Tariff_2] = [, FieldType.Float, ScaleType.KWH, \"\", , False, False]self.m_blk_a[Field.kWh_Tariff_3] = [, FieldType.Float, ScaleType.KWH, \"\", , False, False]self.m_blk_a[Field.kWh_Tariff_4] = [, FieldType.Float, ScaleType.KWH, \"\", , False, False]self.m_blk_a[Field.Rev_kWh_Tot] = [, FieldType.Float, ScaleType.KWH, \"\", , False, False]self.m_blk_a[Field.Rev_kWh_Tariff_1] = [, FieldType.Float, ScaleType.KWH, \"\", , False, False]self.m_blk_a[Field.Rev_kWh_Tariff_2] = [, FieldType.Float, ScaleType.KWH, \"\", , False, False]self.m_blk_a[Field.Rev_kWh_Tariff_3] = [, FieldType.Float, ScaleType.KWH, \"\", , False, False]self.m_blk_a[Field.Rev_kWh_Tariff_4] = [, FieldType.Float, ScaleType.KWH, \"\", , False, False]self.m_blk_a[Field.RMS_Volts_Ln_1] = [, FieldType.Float, ScaleType.Div10, \"\", , False, False]self.m_blk_a[Field.RMS_Volts_Ln_2] = [, FieldType.Float, ScaleType.Div10, \"\", , False, False]self.m_blk_a[Field.RMS_Volts_Ln_3] = [, FieldType.Float, ScaleType.Div10, \"\", , False, False]self.m_blk_a[Field.Amps_Ln_1] = [, FieldType.Float, ScaleType.Div10, \"\", , False, False]self.m_blk_a[Field.Amps_Ln_2] = [, FieldType.Float, ScaleType.Div10, \"\", , False, False]self.m_blk_a[Field.Amps_Ln_3] = [, FieldType.Float, ScaleType.Div10, \"\", , False, False]self.m_blk_a[Field.RMS_Watts_Ln_1] = [, FieldType.Int, ScaleType.No, \"\", , False, False]self.m_blk_a[Field.RMS_Watts_Ln_2] = [, FieldType.Int, ScaleType.No, \"\", , False, False]self.m_blk_a[Field.RMS_Watts_Ln_3] = [, FieldType.Int, ScaleType.No, \"\", , False, False]self.m_blk_a[Field.RMS_Watts_Tot] = [, FieldType.Int, ScaleType.No, \"\", , False, False]self.m_blk_a[Field.Cos_Theta_Ln_1] = [, FieldType.PowerFactor, ScaleType.No, \"\", , False, False]self.m_blk_a[Field.Cos_Theta_Ln_2] = [, FieldType.PowerFactor, ScaleType.No, \"\", , False, False]self.m_blk_a[Field.Cos_Theta_Ln_3] = [, FieldType.PowerFactor, ScaleType.No, \"\", , False, False]self.m_blk_a[Field.Max_Demand] = [, FieldType.Float, ScaleType.KWH, \"\", , False, True]self.m_blk_a[Field.Max_Demand_Period] = [, FieldType.Int, ScaleType.No, \"\", , False, True]self.m_blk_a[Field.Meter_Time] = [, FieldType.String, ScaleType.No, \"\", , False, False]self.m_blk_a[Field.CT_Ratio] = [, FieldType.Int, ScaleType.No, \"\", , False, True]self.m_blk_a[Field.Pulse_Cnt_1] = [, FieldType.Int, ScaleType.No, \"\", , False, False]self.m_blk_a[Field.Pulse_Cnt_2] = [, FieldType.Int, ScaleType.No, \"\", , False, False]self.m_blk_a[Field.Pulse_Cnt_3] = [, FieldType.Int, ScaleType.No, \"\", , False, False]self.m_blk_a[Field.Pulse_Ratio_1] = [, FieldType.Int, ScaleType.No, \"\", , False, True]self.m_blk_a[Field.Pulse_Ratio_2] = [, FieldType.Int, ScaleType.No, \"\", , False, True]self.m_blk_a[Field.Pulse_Ratio_3] = [, FieldType.Int, ScaleType.No, \"\", , False, True]self.m_blk_a[Field.State_Inputs] = [, FieldType.Int, ScaleType.No, \"\", , False, True]self.m_blk_a[\"\"] = [, FieldType.Hex, ScaleType.No, \"\", , False, False]self.m_blk_a[Field.Status_A] = [, FieldType.Hex, ScaleType.No, \"\", , False, False]self.m_blk_a[\"\"] = [, FieldType.Hex, ScaleType.No, \"\", , False, False]self.m_blk_a[\"\"] = [, FieldType.Hex, ScaleType.No, \"\", , False, False]self.m_blk_a[Field.Power_Factor_Ln_1] = [, FieldType.Int, ScaleType.No, \"\", , True, False]self.m_blk_a[Field.Power_Factor_Ln_2] = [, FieldType.Int, ScaleType.No, \"\", , True, False]self.m_blk_a[Field.Power_Factor_Ln_3] = [, FieldType.Int, ScaleType.No, \"\", , True, False]", "docstring": "Initialize :class:`~ekmmeters.SerialBlock` for V3 read.", "id": "f2325:c32:m2"} {"signature": "def initRevMons(self):", "body": "self.m_rev_mons[\"\"] = [, FieldType.Hex, ScaleType.No, \"\", , False, False]self.m_rev_mons[\"\"] = [, FieldType.Float, ScaleType.KWH, \"\", , False, False]self.m_rev_mons[\"\"] = [, FieldType.Float, ScaleType.KWH, \"\", , False, False]self.m_rev_mons[\"\"] = [, FieldType.Float, ScaleType.KWH, \"\", , False, False]self.m_rev_mons[\"\"] = [, FieldType.Float, ScaleType.KWH, \"\", , False, False]self.m_rev_mons[\"\"] = [, FieldType.Float, ScaleType.KWH, \"\", , False, False]self.m_rev_mons[\"\"] = [, FieldType.Float, ScaleType.KWH, \"\", , False, False]self.m_rev_mons[\"\"] = [, FieldType.Float, ScaleType.KWH, \"\", , False, False]self.m_rev_mons[\"\"] = [, FieldType.Float, ScaleType.KWH, \"\", , False, False]self.m_rev_mons[\"\"] = [, FieldType.Float, ScaleType.KWH, \"\", , False, False]self.m_rev_mons[\"\"] = [, FieldType.Float, ScaleType.KWH, \"\", , False, False]self.m_rev_mons[\"\"] = [, FieldType.Float, ScaleType.KWH, \"\", , False, False]self.m_rev_mons[\"\"] = [, FieldType.Float, ScaleType.KWH, \"\", , False, False]self.m_rev_mons[\"\"] = [, FieldType.Float, ScaleType.KWH, \"\", , False, False]self.m_rev_mons[\"\"] = [, FieldType.Float, ScaleType.KWH, \"\", , False, False]self.m_rev_mons[\"\"] = [, FieldType.Float, ScaleType.KWH, \"\", , False, False]self.m_rev_mons[\"\"] = [, FieldType.Float, ScaleType.KWH, \"\", , False, False]self.m_rev_mons[\"\"] = [, FieldType.Float, ScaleType.KWH, \"\", , False, False]self.m_rev_mons[\"\"] = [, FieldType.Float, ScaleType.KWH, \"\", , False, False]self.m_rev_mons[\"\"] = [, FieldType.Float, ScaleType.KWH, \"\", , False, False]self.m_rev_mons[\"\"] = [, FieldType.Float, ScaleType.KWH, \"\", , False, False]self.m_rev_mons[\"\"] = [, FieldType.Float, ScaleType.KWH, \"\", , False, False]self.m_rev_mons[\"\"] = [, FieldType.Float, ScaleType.KWH, \"\", , False, False]self.m_rev_mons[\"\"] = [, FieldType.Float, ScaleType.KWH, \"\", , False, False]self.m_rev_mons[\"\"] = [, FieldType.Float, ScaleType.KWH, \"\", , False, False]self.m_rev_mons[\"\"] = [, FieldType.Float, ScaleType.KWH, \"\", , False, False]self.m_rev_mons[\"\"] = [, FieldType.Float, ScaleType.KWH, \"\", , False, False]self.m_rev_mons[\"\"] = [, FieldType.Float, ScaleType.KWH, \"\", , False, False]self.m_rev_mons[\"\"] = [, FieldType.Float, ScaleType.KWH, \"\", , False, False]self.m_rev_mons[\"\"] = [, FieldType.Float, ScaleType.KWH, \"\", , False, False]self.m_rev_mons[\"\"] = [, FieldType.Float, ScaleType.KWH, \"\", , False, False]self.m_rev_mons[\"\"] = [, FieldType.Hex, ScaleType.No, \"\", , False, False]self.m_rev_mons[\"\"] = [, FieldType.Hex, ScaleType.No, \"\", , False, False]pass", "docstring": "Initialize second (and last) month tarifff :class:`~ekmmeters.SerialBlock` for meter.", "id": "f2325:c29:m26"} {"signature": "def jsonRender(self, def_buf):", "body": "try:ret_dict = SerialBlock()ret_dict[Field.Meter_Address] = self.getMeterAddress()for fld in def_buf:compare_fld = fld.upper()if not \"\" in compare_fld and not \"\" in compare_fld:ret_dict[str(fld)] = def_buf[fld][MeterData.StringValue]except:ekm_log(traceback.format_exc(sys.exc_info()))return \"\"return json.dumps(ret_dict, indent=)", "docstring": "Translate the passed serial block into string only JSON.\n\n Args:\n def_buf (SerialBlock): Any :class:`~ekmmeters.SerialBlock` object.\n\n Returns:\n str: JSON rendering of meter record.", "id": "f2325:c29:m14"} {"signature": "def assignSeasonSchedule(self, season, month, day, schedule):", "body": "season += schedule += if ((season < ) or (season > Extents.Seasons) or (schedule < ) or(schedule > Extents.Schedules) or (month > ) or (month < ) or(day < ) or (day > )):ekm_log(\"\" + str(month) + \"\" + str(day) +\"\" + str(schedule) + \"\" + str(season))return Falseidx_mon = \"\" + str(season) + \"\"idx_day = \"\" + str(season) + \"\"idx_schedule = \"\" + str(season) + \"\"if idx_mon not in self.m_seasons_sched_params:ekm_log(\"\" + idx_mon)return Falseif idx_day not in self.m_seasons_sched_params:ekm_log(\"\" + idx_day)return Falseif idx_schedule not in self.m_seasons_sched_params:ekm_log(\"\" + idx_schedule)return Falseself.m_seasons_sched_params[idx_mon] = monthself.m_seasons_sched_params[idx_day] = dayself.m_seasons_sched_params[idx_schedule] = schedulereturn True", "docstring": "Define a single season and assign a schedule\n\n Args:\n season (int): A :class:`~ekmmeters.Seasons` value or in range(Extent.Seasons).\n month (int): Month 1-12.\n day (int): Day 1-31.\n schedule (int): A :class:`~ekmmeters.LCDItems` value or in range(Extent.Schedules).\n\n Returns:\n bool: True on completion and ACK.", "id": "f2325:c29:m33"} {"signature": "def readSchedules(self, tableset):", "body": "self.setContext(\"\")try:req_table = binascii.hexlify(str(tableset).zfill())req_str = \"\" + req_table + \"\"self.request(False)req_crc = self.calc_crc16(req_str[:].decode(\"\"))req_str += req_crcself.m_serial_port.write(req_str.decode(\"\"))raw_ret = self.m_serial_port.getResponse(self.getContext())self.serialPostEnd()return_crc = self.calc_crc16(raw_ret[:-])if tableset == ReadSchedules.Schedules_1_To_4:unpacked_read = self.unpackStruct(raw_ret, self.m_schd_1_to_4)self.convertData(unpacked_read, self.m_schd_1_to_4, self.m_kwh_precision)if str(return_crc) == str(self.m_schd_1_to_4[\"\"][MeterData.StringValue]):ekm_log(\"\")self.setContext(\"\")return Trueelif tableset == ReadSchedules.Schedules_5_To_6:unpacked_read = self.unpackStruct(raw_ret, self.m_schd_5_to_6)self.convertData(unpacked_read, self.m_schd_5_to_6, self.m_kwh_precision)if str(return_crc) == str(self.m_schd_5_to_6[\"\"][MeterData.StringValue]):ekm_log(\"\")self.setContext(\"\")return Trueexcept:ekm_log(traceback.format_exc(sys.exc_info()))self.setContext(\"\")return False", "docstring": "Serial call to read schedule tariffs buffer\n\n Args:\n tableset (int): :class:`~ekmmeters.ReadSchedules` buffer to return.\n\n Returns:\n bool: True on completion and ACK.", "id": "f2325:c29:m38"} {"signature": "def getReadBuffer(self):", "body": "return self.m_req", "docstring": "Return :class:`~ekmmeters.SerialBlock` for last read.\n\n Appropriate for conversion to JSON or other extraction.\n\n Returns:\n SerialBlock: A read.", "id": "f2325:c32:m5"} {"signature": "def setPulseInputRatio(self, line_in, new_cnst, password=\"\"):", "body": "result = Falseself.setContext(\"\")try:if not self.requestA():self.writeCmdMsg(\"\")else:if not self.serialCmdPwdAuth(password):self.writeCmdMsg(\"\")else:req_const = binascii.hexlify(str(new_cnst).zfill())line_const = binascii.hexlify(str(line_in - ))req_str = \"\" + line_const + \"\" + req_const + \"\"req_str += self.calc_crc16(req_str[:].decode(\"\"))self.m_serial_port.write(req_str.decode(\"\"))if self.m_serial_port.getResponse(self.getContext()).encode(\"\") == \"\":self.writeCmdMsg(\"\")result = Trueself.serialPostEnd()except:ekm_log(traceback.format_exc(sys.exc_info()))self.setContext(\"\")return result", "docstring": "Serial call to set pulse input ratio on a line.\n\n Args:\n line_in (int): Member of :class:`~ekmmeters.Pulse`\n new_cnst (int): New pulse input ratio\n password (str): Optional password\n\n Returns:", "id": "f2325:c33:m18"} {"signature": "def readMonthTariffs(self, months_type):", "body": "self.setContext(\"\")try:req_type = binascii.hexlify(str(months_type).zfill())req_str = \"\" + req_type + \"\"work_table = self.m_monsif months_type == ReadMonths.kWhReverse:work_table = self.m_rev_monsself.request(False)req_crc = self.calc_crc16(req_str[:].decode(\"\"))req_str += req_crcself.m_serial_port.write(req_str.decode(\"\"))raw_ret = self.m_serial_port.getResponse(self.getContext())self.serialPostEnd()unpacked_read = self.unpackStruct(raw_ret, work_table)self.convertData(unpacked_read, work_table, self.m_kwh_precision)return_crc = self.calc_crc16(raw_ret[:-])if str(return_crc) == str(work_table[\"\"][MeterData.StringValue]):ekm_log(\"\" + str(req_type))self.setContext(\"\")return Trueexcept:ekm_log(traceback.format_exc(sys.exc_info()))self.setContext(\"\")return False", "docstring": "Serial call to read month tariffs block into meter object buffer.\n\n Args:\n months_type (int): A :class:`~ekmmeters.ReadMonths` value.\n\n Returns:\n bool: True on completion.", "id": "f2325:c29:m40"} {"signature": "def makeReturnFormat(self):", "body": "for fld in self.m_blk_a:compare_fld = fld.upper()if not \"\" in compare_fld and not \"\" in compare_fld:self.m_req[fld] = self.m_blk_a[fld]pass", "docstring": "Strip reserved and CRC for m_req :class:`~ekmmeters.SerialBlock`.", "id": "f2325:c32:m4"} {"signature": "def setRelay(self, seconds, relay, status, password=\"\"):", "body": "result = Falseself.setContext(\"\")try:self.clearCmdMsg()if len(password) != :self.writeCmdMsg(\"\")self.setContext(\"\")return resultif seconds < or seconds > :self.writeCmdMsg(\"\")self.setContext(\"\")return resultif not self.requestA():self.writeCmdMsg(\"\")else:if not self.serialCmdPwdAuth(password):self.writeCmdMsg(\"\")else:req_str = \"\"req_str = (\"\" +binascii.hexlify(str(relay)).zfill() +\"\" +binascii.hexlify(str(status)).zfill() +binascii.hexlify(str(seconds).zfill()) + \"\")req_str += self.calc_crc16(req_str[:].decode(\"\"))self.m_serial_port.write(req_str.decode(\"\"))if self.m_serial_port.getResponse(self.getContext()).encode(\"\") == \"\":self.writeCmdMsg(\"\")result = Trueself.serialPostEnd()except:ekm_log(traceback.format_exc(sys.exc_info()))self.setContext(\"\")return result", "docstring": "Serial call to set relay.\n\n Args:\n seconds (int): Seconds to hold, ero is hold forever. See :class:`~ekmmeters.RelayInterval`.\n relay (int): Selected relay, see :class:`~ekmmeters.Relay`.\n status (int): Status to set, see :class:`~ekmmeters.RelayState`\n password (str): Optional password\n\n Returns:\n bool: True on completion and ACK.", "id": "f2325:c33:m16"} {"signature": "def setMeterPassword(self, new_pwd, pwd=\"\"):", "body": "result = Falseself.setContext(\"\")try:if len(new_pwd) != or len(pwd) != :self.writeCmdMsg(\"\")self.setContext(\"\")return resultif not self.request(False):self.writeCmdMsg(\"\")else:if not self.serialCmdPwdAuth(pwd):self.writeCmdMsg(\"\")else:req_pwd = binascii.hexlify(new_pwd.zfill())req_str = \"\" + req_pwd + \"\"req_str += self.calc_crc16(req_str[:].decode(\"\"))self.m_serial_port.write(req_str.decode(\"\"))if self.m_serial_port.getResponse(self.getContext()).encode(\"\") == \"\":self.writeCmdMsg(\"\")result = Trueself.serialPostEnd()except:ekm_log(traceback.format_exc(sys.exc_info()))self.setContext(\"\")return result", "docstring": "Serial Call to set meter password. USE WITH CAUTION.\n\n Args:\n new_pwd (str): 8 digit numeric password to set\n pwd (str): Old 8 digit numeric password.\n\n Returns:\n bool: True on completion with ACK.", "id": "f2325:c29:m11"} {"signature": "def addLcdItem(self, lcd_item_no):", "body": "self.m_lcd_items.append(lcd_item_no)pass", "docstring": "Simple append to internal buffer.\n\nUsed with :func:`~ekmmeters.V4Meter.setLcd` and :func:`~ekmmeters.V4Meter.initLcd`\n\nArgs:\n lcd_item_no (int): Member of :class:`~ekmmeters.LCDItems`", "id": "f2325:c33:m22"} {"signature": "@staticmethoddef _insert_img(qr_img, icon_img=None, factor=, icon_box=None, static_dir=None):", "body": "img_w, img_h = qr_img.sizesize_w = int(img_w) / int(factor)size_h = int(img_h) / int(factor)try:icon_fp = os.path.join(icon_img)if static_dir:icon_fp = os.path.join(static_dir, icon_img)if icon_img.split(\"\")[] in [\"\", \"\", \"\"]:icon_fp = BytesIO(urlopen(icon_img).read()) icon = Image.open(icon_fp)except:return qr_imgicon_w, icon_h = icon.sizeicon_w = size_w if icon_w > size_w else icon_wicon_h = size_h if icon_h > size_h else icon_hicon = icon.resize((int(icon_w), int(icon_h)), Image.ANTIALIAS)icon = icon.convert(\"\")left = int((img_w - icon_w) / )top = int((img_h - icon_h) / )icon_box = (int(icon_box[]), int(icon_box[])) if icon_box else (left, top)qr_img.paste(im=icon, box=icon_box, mask=icon)return qr_img", "docstring": "Inserts a small icon to QR Code image", "id": "f2352:c0:m5"} {"signature": "def upload(tags, file_path, username=None, api_key=GIPHY_PUBLIC_KEY,strict=False):", "body": "return Giphy(api_key=api_key, strict=strict).upload(tags, file_path, username)", "docstring": "Shorthand for creating a Giphy api wrapper with the given api key\nand then calling the upload method.", "id": "f2356:m7"} {"signature": "def trending_list(limit=DEFAULT_SEARCH_LIMIT, api_key=GIPHY_PUBLIC_KEY,strict=False, rating=None):", "body": "return Giphy(api_key=api_key, strict=strict).trending_list(limit=limit, rating=rating)", "docstring": "Shorthand for creating a Giphy api wrapper with the given api key\nand then calling the trending_list method.", "id": "f2356:m4"} {"signature": "def translate(self, term=None, phrase=None, strict=False, rating=None):", "body": "assert any((term, phrase)), ''if phrase:phrase = phrase.replace('', '')params = {'': (term or phrase)}if rating:params.update({'': rating})resp = self._fetch('', **params)if resp['']:return GiphyImage(resp[''])elif strict or self.strict:raise GiphyApiException(\"\" %(term or phrase))", "docstring": "Retrieve a single image that represents a transalation of a term or\nphrase into an animated gif. Punctuation is ignored. By default, this\nwill perform a `term` translation. If you want to translate by phrase,\nuse the `phrase` keyword argument.\n\n:param term: Search term or terms\n:type term: string\n:param phrase: Search phrase\n:type phrase: string\n:param strict: Whether an exception should be raised when no results\n:type strict: boolean\n:param rating: limit results to those rated (y,g, pg, pg-13 or r).\n:type rating: string", "id": "f2356:c3:m6"} {"signature": "@propertydef media_url(self):", "body": "return self.original.url", "docstring": "The media URL of the gif at its original size", "id": "f2356:c2:m4"} {"signature": "def float_range(start=, stop=None, step=):", "body": "start = float(start)while start < stop:yield startstart += step", "docstring": "Much like the built-in function range, but accepts floats\n\n>>> tuple(float_range(0, 9, 1.5))\n(0.0, 1.5, 3.0, 4.5, 6.0, 7.5)", "id": "f2365:m2"} {"signature": "def flatten_mapping(mapping):", "body": "return {key: valuefor keys, value in mapping.items()for key in always_iterable(keys)}", "docstring": "For every key that has an __iter__ method, assign the values\nto a key for each.\n\n>>> flatten_mapping({'ab': 3, ('c','d'): 4}) == {'ab': 3, 'c': 4, 'd': 4}\nTrue", "id": "f2365:m1"} {"signature": "@classmethoddef lookup_relativedelta_parameter(cls, unit_string):", "body": "try:return cls._rdp_map()[unit_string.lower()]except KeyError:raise ValueError(\"\")", "docstring": ">>> lrp = Schedule.lookup_relativedelta_parameter\n>>> lrp('Years')\n'years'\n>>> lrp('yr')\n'years'\n>>> lrp('s')\n'seconds'", "id": "f2370:c1:m14"} {"signature": "def parse_css(self):", "body": "cssutils.ser.prefs.useMinified()pairs = ((r.selectorText, r.style.cssText)for r in self.get_stylesheet()if not isinstance(r, cssutils.css.CSSComment))return dict(pairs)", "docstring": "Take a .css file (classes only please) and parse it into a dictionary\nof class/style pairs.", "id": "f2371:c0:m37"} {"signature": "def draw_titles(self):", "body": "if self.show_graph_title:self.draw_graph_title()if self.show_graph_subtitle:self.draw_graph_subtitle()if self.show_x_title:self.draw_x_title()if self.show_y_title:self.draw_y_title()", "docstring": "Draws the graph title and subtitle", "id": "f2371:c0:m28"} {"signature": "def draw_y_labels(self):", "body": "if not self.show_y_labels:returnlabels = self.get_y_labels()count = len(labels)labels = enumerate(iter(labels))start = int(not self.step_include_first_y_label)labels = itertools.islice(labels, start, None, self.step_y_labels)list(map(self.draw_y_label, labels))self.draw_y_guidelines(self.field_height(), count)", "docstring": "Draw the Y axis labels", "id": "f2371:c0:m23"} {"signature": "def draw_x_guidelines(self, label_height, count):", "body": "if not self.show_x_guidelines:returnfor count in range(, count):move = ''.format(start=label_height * count,stop=self.graph_height,)path = {'': move, '': ''}etree.SubElement(self.graph, '', path)", "docstring": "Draw the X-axis guidelines", "id": "f2371:c0:m26"} {"signature": "def make_datapoint_text(self, x, y, value, style=None):", "body": "if not self.show_data_values:returne = etree.SubElement(self.foreground, '', {'': str(x),'': str(y),'': '','': '' % vars(),})e.text = str(value)e = etree.SubElement(self.foreground, '', {'': str(x),'': str(y),'': ''})e.text = str(value)if style:e.set('', style)", "docstring": "Add text for a datapoint", "id": "f2371:c0:m17"} {"signature": "def draw_graph(self):", "body": "transform = '' % (self.border_left, self.border_top)self.graph = etree.SubElement(self.root, '', transform=transform)etree.SubElement(self.graph, '', {'': '','': '','': str(self.graph_width),'': str(self.graph_height),'': ''})etree.SubElement(self.graph, '', {'': '' % self.graph_height,'': '','': ''})etree.SubElement(self.graph, '', {'': '' % (self.graph_height, self.graph_width),'': '','': ''})self.draw_x_labels()self.draw_y_labels()", "docstring": "The central logic for drawing the graph.\n\nSets self.graph (the 'g' element in the SVG root)", "id": "f2371:c0:m15"} {"signature": "def max_y_label_width_px(self):", "body": "if self.rotate_y_labels:return self.font_size", "docstring": "Calculate the width of the widest Y label. This will be the\ncharacter height if the Y labels are rotated.", "id": "f2371:c0:m9"} {"signature": "def x_label_offset(self, width):", "body": "return ", "docstring": "Return an offset for drawing the x label. Currently returns 0.", "id": "f2371:c0:m16"} {"signature": "def y_label_offset(self, height):", "body": "return ", "docstring": "Return an offset for drawing the y label. Currently returns 0.", "id": "f2371:c0:m20"} {"signature": "def start_svg(self):", "body": "SVG_NAMESPACE = ''SVG = '' % SVG_NAMESPACENSMAP = {None: SVG_NAMESPACE,'': '','': '',}root_attrs = self._get_root_attributes()self.root = etree.Element(SVG + \"\", attrib=root_attrs, nsmap=NSMAP)if hasattr(self, ''):pi = etree.ProcessingInstruction('','' % self.style_sheet_href)self.root.addprevious(pi)comment_strings = ('','','','','' + '' * ,)list(map(self.root.append, map(etree.Comment, comment_strings)))defs = etree.SubElement(self.root, '')self.add_defs(defs)if not hasattr(self, '') and not self.css_inline:self.root.append(etree.Comment(''))style = etree.SubElement(defs, '', type='')style.text = self.get_stylesheet().cssTextself.root.append(etree.Comment(''))etree.SubElement(self.root, '', {'': str(self.width),'': str(self.height),'': '','': '','': ''})", "docstring": "Base SVG Document Creation", "id": "f2371:c0:m40"} {"signature": "def calculate_bottom_margin(self):", "body": "bb = if self.key and self.key_position == '':bb += len(self.data) * (self.font_size + )bb += if self.show_x_labels:max_x_label_height_px = self.x_label_font_sizeif self.rotate_x_labels:label_lengths = map(len, self.get_x_labels())max_x_label_len = functools.reduce(max, label_lengths)max_x_label_height_px *= * max_x_label_lenbb += max_x_label_height_pxif self.stagger_x_labels:bb += max_x_label_height_px + if self.show_x_title:bb += self.x_title_font_size + self.border_bottom = bb", "docstring": "Calculate the margin in pixels below the plot area, setting\nborder_bottom.", "id": "f2371:c0:m14"} {"signature": "def add_data(self, data):", "body": "super(Plot, self).add_data(data)", "docstring": "Add data to the plot::\n\n # A data set with 1 point: (\"12:30\", 2)\n d1 = [\"12:30\", 2]\n\n # A data set with 2 points: (\"01:00\", 2) and\n # (\"14:20\", 6)\n d2 = [\"01:00\", 2, \"14:20\", 6]\n\n graph.add_data(\n data = d1,\n title = 'One',\n )\n graph.add_data(\n data = d2,\n title = 'Two',\n )\n\nNote that the data must be in (time, value) pairs, and\nthe date format\nmay be any date that is parseable by dateutil.", "id": "f2372:c0:m0"} {"signature": "def process_module(self, node):", "body": "if self.config.file_header:if sys.version_info[] < :pattern = re.compile('' + self.config.file_header, re.LOCALE | re.MULTILINE)else:pattern = re.compile('' + self.config.file_header, re.MULTILINE)content = Nonewith node.stream() as stream:content = stream.read().decode('')matches = pattern.findall(content)if len(matches) != :self.add_message('', ,args=self.config.file_header)", "docstring": "Process the astroid node stream.", "id": "f2378:c0:m0"} {"signature": "def format_expected(self, expected, webasset):", "body": "from pyramid.path import AssetResolverif '' not in expected:return expectedif '' in webasset:name = AssetResolver(None).resolve(webasset).abspath()else:name = self.temp.tempdir + '' + webassetif webassets_version > (, ):hashed_filename = hashlib.md5(name.encode(\"\")).hexdigest()else:hashed_filename = hash(name) & (( << ) - )external = '' % hashed_filenamereturn expected % {'': external}", "docstring": "Formats the expected url when the webasset is external", "id": "f2382:c3:m3"} {"signature": "def maybebool(value):", "body": "if isinstance(value, six.string_types) and value.lower() in booly:return asbool(value) return value", "docstring": "If `value` is a string type, attempts to convert it to a boolean\nif it looks like it might be one, otherwise returns the value\nunchanged. The difference between this and\n:func:`pyramid.settings.asbool` is how non-bools are handled: this\nreturns the original value, whereas `asbool` returns False.", "id": "f2383:m0"} {"signature": "def get_webassets_env_from_request(request):", "body": "return request.registry.queryUtility(IWebAssetsEnvironment)", "docstring": "Get the webassets environment in the registry from the request.", "id": "f2383:m5"} {"signature": "def get_rand_bytes(encoding='', l=, avoid=[]):", "body": "return encode(get_rand_str(encoding=encoding, l=l, avoid=avoid),encoding=encoding)", "docstring": "encoding-->str: one of ENCODINGS\nl-->int: length of unicode str\navoid-->list of int: to void (unprintable chars etc)\nReturns-->bytes representing unicode str of the requested encoding", "id": "f2385:m15"} {"signature": "def toBytes(x):", "body": "if isinstance(x, bytes):return xelif isinstance(x, bytearray):return bytes(x)elif isinstance(x, unicode):passelse:return x return encode(x, DEF_ENCODING)", "docstring": "x-->unicode string | bytearray | bytes\nReturns-->bytes\nIf x is unicode, MUST have encoding=latin1", "id": "f2385:m6"} {"signature": "def inputToBytes(func, *args, **kwargs):", "body": "return _convInputs(func, toBytes, *args, **kwargs)", "docstring": "Decorator that converts all arguments to bytes", "id": "f2385:m10"} {"signature": "def inputFromBytes(func, *args, **kwargs):", "body": "return _convInputs(func, fromBytes, *args, **kwargs)", "docstring": "Decorator that converts all arguments to latin1-encoded unicode", "id": "f2385:m9"} {"signature": "def load(self):", "body": "try:self.__openlib()return self.lib is not Noneexcept OSError:return False", "docstring": "Returns-->boolean: whether library could be loaded\nWhen this is called, the library would have been loaded if possible\nIf this is not called, the first invocation of a method in lib\nCAN raise OSError if shared library cannot be loaded", "id": "f2386:c0:m1"} {"signature": "def get_buffer(self, *args):", "body": "res = tuple([self.buffer(x) for x in args])if len(res) == :return Noneelif len(res) == :return res[]else:return res", "docstring": "all args-->_cffi_backend.CDataOwn\nMust be a pointer or an array\nReturns-->buffer (if a SINGLE argument was provided)\n LIST of buffer (if a args was a tuple or list)", "id": "f2387:c0:m1"} {"signature": "def get_bytes(self, *args):", "body": "res = tuple([bytes(self.buffer(x)) for x in args])if len(res) == :return Noneelif len(res) == :return res[]else:return res", "docstring": "all args-->_cffi_backend.CDataOwn\nMust be a pointer or an array\nReturns-->bytes (if a SINGLE argument was provided)\n LIST of bytes (if a args was a tuple or list)", "id": "f2387:c0:m2"} {"signature": "@click.command()@click.argument('', nargs=-)def list(pattern=()):", "body": "globs = [''.format(p) for p in pattern] + ['']matches = []offset = len(PROJ_ARCHIVE) + for suffix in globs:glob_pattern = os.path.join(PROJ_ARCHIVE, '', '', suffix)matches.append(set(f[offset:] for f in glob.glob(glob_pattern)))matches = reduce(lambda x, y: x.intersection(y),matches)for m in sorted(matches):print(m)", "docstring": "List the contents of the archive directory.", "id": "f2390:m9"} {"signature": "def _mkdir(p):", "body": "isdir = os.path.isdirstack = [os.path.abspath(p)]while not isdir(stack[-]):parent_dir = os.path.dirname(stack[-])stack.append(parent_dir)while stack:p = stack.pop()if not isdir(p):os.mkdir(p)", "docstring": "The equivalent of 'mkdir -p' in shell.", "id": "f2390:m8"} {"signature": "def prettyprint_xml(element):", "body": "return etree.tostring(element, pretty_print=True).decode('')", "docstring": "A rough and dirty way to prettyprint an Element with indention.\n\n:param lxml.etree._Element element: The Element or ElementTree to format.\n:rtype: str\n:returns: A prettyprinted representation of the element.", "id": "f2398:m1"} {"signature": "def _change_resource_record_sets(self, change_set, comment=None):", "body": "body = xml_generators.change_resource_record_set_writer(connection=self,change_set=change_set,comment=comment)root = self._send_request(path='' % change_set.hosted_zone_id,data=body,method='',)e_change_info = root.find('')if e_change_info is None:error = root.find('').find('').textraise Route53Error(error)return parse_change_info(e_change_info)", "docstring": "Given a ChangeSet, POST it to the Route53 API.\n\n.. note:: You probably shouldn't be using this method directly,\n as there are convenience methods on the ResourceRecordSet\n sub-classes.\n\n:param change_set.ChangeSet change_set: The ChangeSet object to create\n the XML doc from.\n:keyword str comment: An optional comment to go along with the request.\n:rtype: dict\n:returns: A dict of change info, which contains some details about\n the request.", "id": "f2399:c0:m8"} {"signature": "def _do_autopaginating_api_call(self, path, params, method, parser_func,next_marker_xpath, next_marker_param_name,next_type_xpath=None, parser_kwargs=None):", "body": "if not parser_kwargs:parser_kwargs = {}while True:root = self._send_request(path, params, method)for record in parser_func(root, connection=self, **parser_kwargs):yield recordnext_marker = root.find(next_marker_xpath)if next_marker is None:breakparams[next_marker_param_name] = next_marker.textif next_type_xpath:next_type = root.find(next_type_xpath)params[''] = next_type.text", "docstring": "Given an API method, the arguments passed to it, and a function to\nhand parsing off to, loop through the record sets in the API call\nuntil all records have been yielded.\n\n\n:param str method: The API method on the endpoint.\n:param dict params: The kwargs from the top-level API method.\n:param callable parser_func: A callable that is used for parsing the\n output from the API call.\n:param str next_marker_param_name: The XPath to the marker tag that\n will determine whether we continue paginating.\n:param str next_marker_param_name: The parameter name to manipulate\n in the request data to bring up the next page on the next\n request loop.\n:keyword str next_type_xpath: For the\n py:meth:`list_resource_record_sets_by_zone_id` method, there's\n an additional paginator token. Specifying this XPath looks for it.\n:keyword dict parser_kwargs: Optional dict of additional kwargs to pass\n on to the parser function.\n:rtype: generator\n:returns: Returns a generator that may be returned by the top-level\n API method.", "id": "f2399:c0:m2"} {"signature": "def list_hosted_zones(self, page_chunks=):", "body": "return self._do_autopaginating_api_call(path='',params={'': page_chunks},method='',parser_func=xml_parsers.list_hosted_zones_parser,next_marker_xpath=\"\",next_marker_param_name=\"\",)", "docstring": "List all hosted zones associated with this connection's account. Since\nthis method returns a generator, you can pull as many or as few\nentries as you'd like, without having to query and receive every\nhosted zone you may have.\n\n:keyword int page_chunks: This API call is \"paginated\" behind-the-scenes\n in order to break up large result sets. This number determines\n the maximum number of\n :py:class:`HostedZone `\n instances to retrieve per request. The default is fine for almost\n everyone.\n\n:rtype: generator\n:returns: A generator of :py:class:`HostedZone `\n instances.", "id": "f2399:c0:m3"} {"signature": "def is_alias_record_set(self):", "body": "return False", "docstring": "Checks whether this is an A record in Alias mode.\n\n:rtype: bool\n:returns: ``True`` if this is an A record in Alias mode, and\n ``False`` otherwise.", "id": "f2401:c0:m6"} {"signature": "def is_alias_record_set(self):", "body": "return self.alias_hosted_zone_id or self.alias_dns_name", "docstring": "Checks whether this is an A record in Alias mode.\n\n:rtype: bool\n:returns: ``True`` if this is an A record in Alias mode, and\n ``False`` otherwise.", "id": "f2401:c3:m1"} {"signature": "def __init__(self, alias_hosted_zone_id=None, alias_dns_name=None, *args, **kwargs):", "body": "super(CNAMEResourceRecordSet, self).__init__(*args, **kwargs)self.alias_hosted_zone_id = alias_hosted_zone_idself.alias_dns_name = alias_dns_nameself._initial_vals.update(dict(alias_hosted_zone_id=alias_hosted_zone_id,alias_dns_name=alias_dns_name,))", "docstring": ":keyword str alias_hosted_zone_id: Alias CNAME records have this specified.\n It appears to be the hosted zone ID for the ELB the Alias points at.\n:keyword str alias_dns_name: Alias CNAME records have this specified. It is\n the DNS name for the ELB that the Alias points to.", "id": "f2401:c3:m0"} {"signature": "def __init__(self, alias_hosted_zone_id=None, alias_dns_name=None, *args, **kwargs):", "body": "super(AResourceRecordSet, self).__init__(*args, **kwargs)self.alias_hosted_zone_id = alias_hosted_zone_idself.alias_dns_name = alias_dns_nameself._initial_vals.update(dict(alias_hosted_zone_id=alias_hosted_zone_id,alias_dns_name=alias_dns_name,))", "docstring": ":keyword str alias_hosted_zone_id: Alias A records have this specified.\n It appears to be the hosted zone ID for the ELB the Alias points at.\n:keyword str alias_dns_name: Alias A records have this specified. It is\n the DNS name for the ELB that the Alias points to.", "id": "f2401:c1:m0"} {"signature": "def create_hosted_zone_writer(connection, name, caller_reference, comment):", "body": "if not caller_reference:caller_reference = str(uuid.uuid4())e_root = etree.Element(\"\",xmlns=connection._xml_namespace)e_name = etree.SubElement(e_root, \"\")e_name.text = namee_caller_reference = etree.SubElement(e_root, \"\")e_caller_reference.text = caller_referenceif comment:e_config = etree.SubElement(e_root, \"\")e_comment = etree.SubElement(e_config, \"\")e_comment.text = commente_tree = etree.ElementTree(element=e_root)fobj = BytesIO()e_tree.write(fobj, xml_declaration=True, encoding='', method=\"\")return fobj.getvalue().decode('')", "docstring": "Forms an XML string that we'll send to Route53 in order to create\na new hosted zone.\n\n:param Route53Connection connection: The connection instance used to\n query the API.\n:param str name: The name of the hosted zone to create.", "id": "f2402:m0"} {"signature": "def write_change(change):", "body": "action, rrset = changechange_vals = get_change_values(change)e_change = etree.Element(\"\")e_action = etree.SubElement(e_change, \"\")e_action.text = actione_rrset = etree.SubElement(e_change, \"\")e_name = etree.SubElement(e_rrset, \"\")e_name.text = change_vals['']e_type = etree.SubElement(e_rrset, \"\")e_type.text = rrset.rrset_typeif change_vals.get(''):e_set_id = etree.SubElement(e_rrset, \"\")e_set_id.text = change_vals['']if change_vals.get(''):e_weight = etree.SubElement(e_rrset, \"\")e_weight.text = change_vals['']if change_vals.get('') or change_vals.get(''):e_alias_target = etree.SubElement(e_rrset, \"\")e_hosted_zone_id = etree.SubElement(e_alias_target, \"\")e_hosted_zone_id.text = change_vals['']e_dns_name = etree.SubElement(e_alias_target, \"\")e_dns_name.text = change_vals['']if change_vals.get(''):e_weight = etree.SubElement(e_rrset, \"\")e_weight.text = change_vals['']e_ttl = etree.SubElement(e_rrset, \"\")e_ttl.text = str(change_vals[''])if rrset.is_alias_record_set():return e_changee_resource_records = etree.SubElement(e_rrset, \"\")for value in change_vals['']:e_resource_record = etree.SubElement(e_resource_records, \"\")e_value = etree.SubElement(e_resource_record, \"\")e_value.text = valuereturn e_change", "docstring": "Creates an XML element for the change.\n\n:param tuple change: A change tuple from a ChangeSet. Comes in the form\n of ``(action, rrset)``.\n:rtype: lxml.etree._Element\n:returns: A fully baked Change tag.", "id": "f2403:m1"} {"signature": "def create_a_record(self, name, values, ttl=, weight=None, region=None,set_identifier=None, alias_hosted_zone_id=None,alias_dns_name=None):", "body": "self._halt_if_already_deleted()values = locals()del values['']return self._add_record(AResourceRecordSet, **values)", "docstring": "Creates and returns an A record attached to this hosted zone.\n\n:param str name: The fully qualified name of the record to add.\n:param list values: A list of value strings for the record.\n:keyword int ttl: The time-to-live of the record (in seconds).\n:keyword int weight: *For weighted record sets only*. Among resource record\n sets that have the same combination of DNS name and type, a value\n that determines what portion of traffic for the current resource\n record set is routed to the associated location. Ranges from 0-255.\n:keyword str region: *For latency-based record sets*. The Amazon EC2 region\n where the resource that is specified in this resource record set\n resides.\n:keyword str set_identifier: *For weighted and latency resource record\n sets only*. An identifier that differentiates among multiple\n resource record sets that have the same combination of DNS name\n and type. 1-128 chars.\n:keyword str alias_hosted_zone_id: Alias A records have this specified.\n It appears to be the hosted zone ID for the ELB the Alias points at.\n:keyword str alias_dns_name: Alias A records have this specified. It is\n the DNS name for the ELB that the Alias points to.\n:rtype: tuple\n:returns: A tuple in the form of ``(rrset, change_info)``, where\n ``rrset`` is the newly created\n :py:class:`AResourceRecordSet `\n instance.", "id": "f2405:c0:m7"} {"signature": "def create_spf_record(self, name, values, ttl=):", "body": "self._halt_if_already_deleted()values = locals()del values['']return self._add_record(SPFResourceRecordSet, **values)", "docstring": "Creates a SPF record attached to this hosted zone.\n\n:param str name: The fully qualified name of the record to add.\n:param list values: A list of value strings for the record.\n:keyword int ttl: The time-to-live of the record (in seconds).\n:rtype: tuple\n:returns: A tuple in the form of ``(rrset, change_info)``, where\n ``rrset`` is the newly created SPFResourceRecordSet instance.", "id": "f2405:c0:m13"} {"signature": "def _halt_if_already_deleted(self):", "body": "if self._is_deleted:raise AlreadyDeletedError(\"\")", "docstring": "Convenience method used to raise an AlreadyDeletedError exception if\nthis HostedZone has been deleted.\n\n:raises: AlreadyDeletedError", "id": "f2405:c0:m5"} {"signature": "def _add_record(self, record_set_class, name, values, ttl=, weight=None,region=None,set_identifier=None, alias_hosted_zone_id=None,alias_dns_name=None):", "body": "self._halt_if_already_deleted()rrset_kwargs = dict(connection=self.connection,zone_id=self.id,name=name,ttl=ttl,records=values,weight=weight,region=region,set_identifier=set_identifier,)if alias_hosted_zone_id or alias_dns_name:rrset_kwargs.update(dict(alias_hosted_zone_id=alias_hosted_zone_id,alias_dns_name=alias_dns_name))rrset = record_set_class(**rrset_kwargs)cset = ChangeSet(connection=self.connection, hosted_zone_id=self.id)cset.add_change('', rrset)change_info = self.connection._change_resource_record_sets(cset)return rrset, change_info", "docstring": "Convenience method for creating ResourceRecordSets. Most of the calls\nare basically the same, this saves on repetition.\n\n:rtype: tuple\n:returns: A tuple in the form of ``(rrset, change_info)``, where\n ``rrset`` is the newly created ResourceRecordSet sub-class\n instance.", "id": "f2405:c0:m6"} {"signature": "def connect(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):", "body": "from route53.connection import Route53Connectionreturn Route53Connection(aws_access_key_id,aws_secret_access_key,**kwargs)", "docstring": "Instantiates and returns a :py:class:`route53.connection.Route53Connection`\ninstance, which is how you'll start your interactions with the Route 53\nAPI.\n\n:keyword str aws_access_key_id: Your AWS Access Key ID\n:keyword str aws_secret_access_key: Your AWS Secret Access Key\n\n:rtype: :py:class:`route53.connection.Route53Connection`\n:return: A connection to Amazon's Route 53", "id": "f2406:m0"} {"signature": "def _send_post_request(self, path, data, headers):", "body": "raise NotImplementedError", "docstring": "Transport sub-classes need to override this.\n\nSends the POST request to the Route53 endpoint.\n\n:param str path: The path to tack on to the endpoint URL for\n the query.\n:param data: Either a dict, or bytes.\n:type data: dict or bytes\n:param dict headers: A dict of headers to send with the request.\n:rtype: str\n:returns: The body of the response.", "id": "f2407:c0:m6"} {"signature": "@propertydef endpoint(self):", "body": "return self.connection._endpoint", "docstring": ":rtype: str\n:returns: The Route53 API endpoint to query against.", "id": "f2407:c0:m1"} {"signature": "def parse_rrset(e_rrset, connection, zone_id):", "body": "kwargs = {'': connection,'': zone_id,}rrset_type = Nonefor e_field in e_rrset:tag_name = e_field.tag.split('')[]field_text = e_field.textif tag_name == '':rrset_type = field_textcontinueelif tag_name == '':alias_hosted_zone_id, alias_dns_name = parse_rrset_alias(e_field)kwargs[''] = alias_hosted_zone_idkwargs[''] = alias_dns_namekwargs[''] = Nonecontinueelif tag_name == '':kwargs[''] = parse_rrset_record_values(e_field)continuekw_name = RRSET_TAG_TO_KWARG_MAP[tag_name]kwargs[kw_name] = field_textif not rrset_type:raise Route53Error(\"\")if '' not in kwargs:kwargs[''] = []RRSetSubclass = RRSET_TYPE_TO_RSET_SUBCLASS_MAP[rrset_type]return RRSetSubclass(**kwargs)", "docstring": "This a parser that allows the passing of any valid ResourceRecordSet\ntag. It will spit out the appropriate ResourceRecordSet object for the tag.\n\n:param lxml.etree._Element e_rrset: The root node of the etree parsed\n response from the API.\n:param Route53Connection connection: The connection instance used to\n query the API.\n:param str zone_id: The zone ID of the HostedZone these rrsets belong to.\n:rtype: ResourceRecordSet\n:returns: An instantiated ResourceRecordSet object.", "id": "f2408:m2"} {"signature": "def list_hosted_zones_parser(root, connection):", "body": "zones = root.find('')for zone in zones:yield parse_hosted_zone(zone, connection)", "docstring": "Parses the API responses for the\n:py:meth:`route53.connection.Route53Connection.list_hosted_zones` method.\n\n:param lxml.etree._Element root: The root node of the etree parsed\n response from the API.\n:param Route53Connection connection: The connection instance used to\n query the API.\n:rtype: HostedZone\n:returns: A generator of fully formed HostedZone instances.", "id": "f2414:m0"} {"signature": "def parse_hosted_zone(e_zone, connection):", "body": "kwargs = {}for e_field in e_zone:tag_name = e_field.tag.split('')[]field_text = e_field.textif tag_name == '':e_comment = e_field.find('')kwargs[''] = e_comment.text if e_comment is not None else Nonecontinueelif tag_name == '':field_text = field_text.strip('')kw_name = HOSTED_ZONE_TAG_TO_KWARG_MAP[tag_name]kwargs[kw_name] = field_textreturn HostedZone(connection, **kwargs)", "docstring": "This a common parser that allows the passing of any valid HostedZone\ntag. It will spit out the appropriate HostedZone object for the tag.\n\n:param lxml.etree._Element e_zone: The root node of the etree parsed\n response from the API.\n:param Route53Connection connection: The connection instance used to\n query the API.\n:rtype: HostedZone\n:returns: An instantiated HostedZone object.", "id": "f2415:m0"} {"signature": "def read_logfile(log_file):", "body": "dirname = os.path.dirname(log_file) + ''with open(log_file, '') as f:rlog = f.readlines()hashind = [i for i, n in enumerate(rlog) if '' in n]pathread = re.compile('')paths = (pathread.match(l).groups() for l in rlog[hashind[] + :hashind[-]] if pathread.match(l))paths = {k: os.path.join(dirname, v) for k, v in paths}logread = re.compile('')runargs = []for line in rlog[hashind[] + :]:fname, args, kwargs = (logread.match(line).groups())runargs.append((fname ,{'': eval(args), '': eval(kwargs)}))if fname == '':runargs[-][-][''][''] = ''runargs[-][-][''][''] = Nonerunargs[-][-][''][''] = paths['']if '' in paths:runargs[-][-][''][''] = paths['']return runargs, paths", "docstring": "Reads an latools analysis.log file, and returns dicts of arguments.\n\nParameters\n----------\nlog_file : str\n Path to an analysis.log file produced by latools.\n\nReturns\n-------\nrunargs, paths : tuple\n Two dictionaries. runargs contains all the arguments required to run each step\n of analysis in the form (function_name, {'args': (), 'kwargs': {}}). paths contains\n the locations of the data directory and the SRM database used for analysis.", "id": "f2424:m2"} {"signature": "def to_mass_fraction(molar_ratio, massfrac_denominator, numerator_mass, denominator_mass):", "body": "return molar_ratio * massfrac_denominator * numerator_mass / denominator_mass", "docstring": "Converts per-mass concentrations to molar elemental ratios.\n\nBe careful with units.\n\nParameters\n----------\nmolar_ratio : float or array-like\n The molar ratio of elements.\nmassfrac_denominator : float or array-like\n The mass fraction of the denominator element\nnumerator_mass, denominator_mass : float or array-like\n The atomic mass of the numerator and denominator.\n\nReturns\n-------\nfloat or array-like : The mass fraction of the numerator element.", "id": "f2425:m3"} {"signature": "def elements(all_isotopes=True):", "body": "el = pd.read_pickle(pkgrs.resource_filename('', ''))if all_isotopes:return el.set_index('')else:def wmean(g):return (g.atomic_weight * g.percent).sum() / iel = el.groupby('').apply(wmean)iel.name = ''return iel", "docstring": "Loads a DataFrame of all elements and isotopes.\n\nScraped from https://www.webelements.com/\n\nReturns\n-------\npandas DataFrame with columns (element, atomic_number, isotope, atomic_weight, percent)", "id": "f2425:m0"} {"signature": "def to_molar_ratio(massfrac_numerator, massfrac_denominator, numerator_mass, denominator_mass):", "body": "return (massfrac_numerator / numerator_mass) / (massfrac_denominator / denominator_mass)", "docstring": "Converts per-mass concentrations to molar elemental ratios.\n\nBe careful with units.\n\nParameters\n----------\nnumerator_mass, denominator_mass : float or array-like\n The atomic mass of the numerator and denominator.\nmassfrac_numerator, massfrac_denominator : float or array-like\n The per-mass fraction of the numnerator and denominator.\n\nReturns\n-------\nfloat or array-like : The molar ratio of elements in the material", "id": "f2425:m2"} {"signature": "def autorange_plot(t, sig, gwin=, swin=None, win=,on_mult=(, ), off_mult=(, ),nbin=, thresh=None):", "body": "if swin is None:swin = gwin // sigs = fastsmooth(sig, swin)bins = sig.size // nbinkde_x = np.linspace(sig.min(), sig.max(), bins)kde = gaussian_kde(sigs)yd = kde.pdf(kde_x)mins = findmins(kde_x, yd) if thresh is not None:mins = [thresh]if len(mins) > :bkg = sigs < (mins[]) else:bkg = np.ones(sig.size, dtype=bool)fbkg = bkgfsig = ~bkgg = abs(fastgrad(sigs, gwin)) zeros = bool_2_indices(fsig)if zeros is not None:zeros = zeros.flatten()lohi = []pgs = []excl = []tps = []failed = []for z in zeros: if z - win < :lo = gwin // hi = int(z + win)elif z + win > (len(sig) - gwin // ):lo = int(z - win)hi = len(sig) - gwin // else:lo = int(z - win)hi = int(z + win)xs = t[lo:hi]ys = g[lo:hi]lohi.append([lo, hi])mid = (hi + lo) // tp = sigs[mid + ] > sigs[mid - ] tps.append(tp)c = t[z] width = (t[] - t[]) * try:pg, _ = curve_fit(gauss, xs, ys,p0=(np.nanmax(ys),c,width),sigma=(xs - c)** + )pgs.append(pg)fwhm = abs( * pg[-] * np.sqrt( * np.log()))if tp:lim = np.array([-fwhm, fwhm]) * on_mult + pg[]else:lim = np.array([-fwhm, fwhm]) * off_mult + pg[]excl.append(lim)fbkg[(t > lim[]) & (t < lim[])] = Falsefsig[(t > lim[]) & (t < lim[])] = Falsefailed.append(False)except RuntimeError:failed.append(True)lohi.append([np.nan, np.nan])pgs.append([np.nan, np.nan, np.nan])excl.append([np.nan, np.nan])tps.append(tp)passelse:zeros = []nrows = + len(zeros) // + len(zeros) % fig, axs = plt.subplots(nrows, , figsize=(, + * nrows))ax1, ax2, ax3, ax4 = axs.flat[:]ax4.set_visible(False)for ax in [ax1, ax3]:p = ax.axes.get_position()p2 = [p.x0, p.y0, p.width * , p.height]ax.axes.set_position(p2)p = ax3.axes.get_position()p2 = [p.x0, p.y0 + * p.height, p.width, p.height]ax3.axes.set_position(p2)p = ax2.axes.get_position()p2 = [p.x0 + p.width * , p.y0, p.width * , p.height]ax2.axes.set_position(p2)ax1.plot(t, sig, color='', lw=)ax1.set_xticklabels([])ax1.set_ylabel('')ax3.plot(t, g, color='', lw=)ax3.set_xlabel('')ax3.set_ylabel('')ax2.fill_betweenx(kde_x, yd, color=(, , , ))ax2.plot(yd, kde_x, color='')ax2.set_ylim(ax1.get_ylim())ax2.set_yticklabels([])ax2.set_xlabel('')for ax in [ax1, ax2]:ax.axhline(mins[], color='', ls='', alpha=)if len(zeros) > :for z in zeros:ax1.axvline(t[z], color='', alpha=)ax3.axvline(t[z], color='', alpha=)n = for (lo, hi), lim, tp, pg, fail, ax in zip(lohi, excl, tps, pgs, failed, axs.flat[:]):ax3.axvspan(t[lo], t[hi], color='', alpha=, zorder=-)x = t[lo:hi]y = g[lo:hi]ys = sig[lo:hi]ax.scatter(x, y, color='', marker='', zorder=-, s=)ax.set_yticklabels([])ax.set_ylim(rangecalc(y))tax = ax.twinx()tax.plot(x, ys, color='', alpha=, zorder=-)tax.set_yticklabels([])tax.set_ylim(rangecalc(ys))xn = np.linspace(x.min(), x.max(), )ax.plot(xn, gauss(xn, *pg), color='', alpha=)ax.axvline(pg[], color='', alpha=)ax.axvspan(*lim, color='', alpha=, zorder=-)ax1.axvspan(*lim, color='', alpha=, zorder=-)if tp:ax.text(, , ''.format(n), ha='',va='', transform=ax.transAxes)else:ax.text(, , ''.format(n), ha='',va='', transform=ax.transAxes)if ax.is_last_row():ax.set_xlabel('')if ax.is_first_col():ax.set_ylabel('')if ax.is_last_col():tax.set_ylabel('')if fail:ax.axes.set_facecolor((, , , ))ax.text(, , '', ha='', va='',fontsize=, color=(, , , ), transform=ax.transAxes)n += if len(zeros) % == :axs.flat[-].set_visible = Falsereturn fig, axs", "docstring": "Function for visualising the autorange mechanism.\n\nParameters\n----------\nt : array-like\n Independent variable (usually time).\nsig : array-like\n Dependent signal, with distinctive 'on' and 'off' regions.\ngwin : int\n The window used for calculating first derivative.\n Defaults to 7.\nswin : int\n The window ised for signal smoothing. If None, gwin // 2.\nwin : int\n The width (c +/- win) of the transition data subsets.\n Defaults to 20.\non_mult and off_mult : tuple, len=2\n Control the width of the excluded transition regions, which is defined\n relative to the peak full-width-half-maximum (FWHM) of the transition\n gradient. The region n * FHWM below the transition, and m * FWHM above\n the tranision will be excluded, where (n, m) are specified in `on_mult`\n and `off_mult`.\n `on_mult` and `off_mult` apply to the off-on and on-off transitions,\n respectively.\n Defaults to (1.5, 1) and (1, 1.5).\nnbin : ind\n Used to calculate the number of bins in the data histogram.\n bins = len(sig) // nbin\n\nReturns\n-------\nfig, axes", "id": "f2426:m5"} {"signature": "def crossplot(dat, keys=None, lognorm=True, bins=, figsize=(, ),colourful=True, focus_stage=None, denominator=None,mode='', cmap=None, **kwargs):", "body": "if keys is None:keys = list(dat.keys())numvar = len(keys)if figsize[] < * numvar:figsize = [ * numvar] * fig, axes = plt.subplots(nrows=numvar, ncols=numvar,figsize=(, ))fig.subplots_adjust(hspace=, wspace=)for ax in axes.flat:ax.xaxis.set_visible(False)ax.yaxis.set_visible(False)if ax.is_first_col():ax.yaxis.set_ticks_position('')if ax.is_last_col():ax.yaxis.set_ticks_position('')if ax.is_first_row():ax.xaxis.set_ticks_position('')if ax.is_last_row():ax.xaxis.set_ticks_position('')if colourful:cmlist = ['', '', '', '','', '', '', '','', '', '', '','', '', '', '', '', '']else:cmlist = ['']if cmap is None and mode == '':cmap = {k: '' for k in dat.keys()}while len(cmlist) < len(keys):cmlist *= focus = {k: nominal_values(dat[k]) for k in keys}udict = {a: unitpicker(np.nanmean(focus[a]),focus_stage=focus_stage,denominator=denominator) for a in keys}rdict = {a: (np.nanmin(focus[a] * udict[a][]),np.nanmax(focus[a] * udict[a][])) for a in keys}for i, j in tqdm(zip(*np.triu_indices_from(axes, k=)), desc='',total=sum(range(len(keys)))):ai = keys[i]aj = keys[j]pi = focus[ai] * udict[ai][]pj = focus[aj] * udict[aj][]if lognorm:norm = mpl.colors.LogNorm()else:norm = Noneif mode == '':pi = pi[~np.isnan(pi)]pj = pj[~np.isnan(pj)]axes[i, j].hist2d(pj, pi, bins,norm=norm,cmap=plt.get_cmap(cmlist[i]))axes[j, i].hist2d(pi, pj, bins,norm=norm,cmap=plt.get_cmap(cmlist[j]))elif mode == '':axes[i, j].scatter(pj, pi, s=,color=cmap[ai], lw=, edgecolor='',alpha=)axes[j, i].scatter(pi, pj, s=,color=cmap[aj], lw=, edgecolor='',alpha=)else:raise ValueError(\"\")axes[i, j].set_ylim(*rdict[ai])axes[i, j].set_xlim(*rdict[aj])axes[j, i].set_ylim(*rdict[aj])axes[j, i].set_xlim(*rdict[ai])for a, n in zip(keys, np.arange(len(keys))):axes[n, n].annotate(a + '' + udict[a][], (, ),xycoords='',ha='', va='', fontsize=)axes[n, n].set_xlim(*rdict[a])axes[n, n].set_ylim(*rdict[a])for i, j in zip(range(numvar), itertools.cycle((-, ))):axes[j, i].xaxis.set_visible(True)for label in axes[j, i].get_xticklabels():label.set_rotation()axes[i, j].yaxis.set_visible(True)return fig, axes", "docstring": "Plot analytes against each other.\n\nThe number of plots is n**2 - n, where n = len(keys).\n\nParameters\n----------\ndat : dict\n A dictionary of key: data pairs, where data is the same\n length in each entry.\nkeys : optional, array_like or str\n The keys of dat to plot. Defaults to all keys.\nlognorm : bool\n Whether or not to log normalise the colour scale\n of the 2D histogram.\nbins : int\n The number of bins in the 2D histogram.\nfigsize : tuple\ncolourful : bool\n\nReturns\n-------\n(fig, axes)", "id": "f2426:m3"} {"signature": "def histograms(dat, keys=None, bins=, logy=False, cmap=None, ncol=):", "body": "if keys is None:keys = dat.keys()ncol = int(ncol)nrow = calc_nrow(len(keys), ncol)fig, axs = plt.subplots(nrow, , figsize=[ncol * , nrow * ])pn = for k, ax in zip(keys, axs.flat):tmp = nominal_values(dat[k])x = tmp[~np.isnan(tmp)]if cmap is not None:c = cmap[k]else:c = (, , , )ax.hist(x, bins=bins, color=c)if logy:ax.set_yscale('')ylab = ''else:ylab = ''ax.set_ylim(, ax.get_ylim()[])if ax.is_first_col():ax.set_ylabel(ylab)ax.set_yticklabels([])ax.text(, , k, ha='', va='', transform=ax.transAxes)pn += for ax in axs.flat[pn:]:ax.set_visible(False)fig.tight_layout()return fig, axs", "docstring": "Plot histograms of all items in dat.\n\nParameters\n----------\ndat : dict\n Data in {key: array} pairs.\nkeys : arra-like\n The keys in dat that you want to plot. If None,\n all are plotted.\nbins : int\n The number of bins in each histogram (default = 25)\nlogy : bool\n If true, y axis is a log scale.\ncmap : dict\n The colours that the different items should be. If None,\n all are grey.\n\nReturns\n-------\nfig, axes", "id": "f2426:m4"} {"signature": "def unitpicker(a, llim=, denominator=None, focus_stage=None):", "body": "if not isinstance(a, (int, float)):a = nominal_values(a)a = np.percentile(a[~np.isnan(a)], )if denominator is not None:pd = pretty_element(denominator)else:pd = ''if focus_stage == '':udict = {: '' + pd,: '' + pd,: '' + pd,: '' + pd,: '' + pd,: '' + pd}elif focus_stage == '':udict = {: '' + pd,: '' + pd,: '' + pd,: '' + pd,: '' + pd,: '' + pd}elif focus_stage in ('', '', ''):udict = udict = {: '',: '',: '',: '',: '',: ''}else:udict = {: '', : '', : '', : '', : '', : ''}a = abs(a)n = if a < llim:while a < llim:a *= n += return float(**n), udict[n]", "docstring": "Determines the most appropriate plotting unit for data.\n\nParameters\n----------\na : float or array-like\n number to optimise. If array like, the 25% quantile is optimised.\nllim : float\n minimum allowable value in scaled data.\n\nReturns\n-------\n(float, str)\n (multiplier, unit)", "id": "f2428:m4"} {"signature": "def get_total_time_span(d):", "body": "tmax = for di in d.values():if di.uTime.max() > tmax:tmax = di.uTime.max()return tmax", "docstring": "Returns total length of analysis.", "id": "f2428:m3"} {"signature": "def fastgrad(a, win=):", "body": "if win % == :win += wins = rolling_window(a, win, '')a = map(lambda x: np.polyfit(np.arange(win), x, )[], wins)return np.array(list(a))", "docstring": "Returns rolling - window gradient of a.\n\nFunction to efficiently calculate the rolling gradient of a numpy\narray using 'stride_tricks' to split up a 1D array into an ndarray of\nsub - sections of the original array, of dimensions [len(a) - win, win].\n\nParameters\n----------\na : array_like\n The 1D array to calculate the rolling gradient of.\nwin : int\n The width of the rolling window.\n\nReturns\n-------\narray_like\n Gradient of a, assuming as constant integer x - scale.", "id": "f2428:m16"} {"signature": "def get_date(datetime, time_format=None):", "body": "if time_format is None:t = du.parser.parse(datetime)else:t = dt.datetime.strftime(datetime, time_format)return t", "docstring": "Return a datetime oject from a string, with optional time format.\n\nParameters\n----------\ndatetime : str\n Date-time as string in any sensible format.\ntime_format : datetime str (optional)\n String describing the datetime format. If missing uses\n dateutil.parser to guess time format.", "id": "f2428:m1"} {"signature": "def get_total_n_points(d):", "body": "n = for di in d.values():n += len(di)return n", "docstring": "Returns the total number of data points in values of dict.\n\nParamters\n---------\nd : dict", "id": "f2428:m2"} {"signature": "def gauss_weighted_stats(x, yarray, x_new, fwhm):", "body": "sigma = fwhm / ( * np.sqrt( * np.log()))mask = np.zeros((x.size, yarray.shape[], x_new.size))for i, xni in enumerate(x_new):mask[:, :, i] = gauss(x[:, np.newaxis], , xni, sigma)nmask = mask / mask.sum() av = (nmask * yarray[:, :, np.newaxis]).sum() diff = np.power(av - yarray[:, :, np.newaxis], )std = np.sqrt((diff * nmask).sum())se = std / np.sqrt(mask.sum())return av, std, se", "docstring": "Calculate gaussian weigted moving mean, SD and SE.\n\nParameters\n----------\nx : array-like\n The independent variable\nyarray : (n,m) array\n Where n = x.size, and m is the number of\n dependent variables to smooth.\nx_new : array-like\n The new x-scale to interpolate the data\nfwhm : int\n FWHM of the gaussian kernel.\n\nReturns\n-------\n(mean, std, se) : tuple", "id": "f2431:m5"} {"signature": "def gauss(x, *p):", "body": "A, mu, sigma = preturn A * np.exp(- * (-mu + x)** / sigma**)", "docstring": "Gaussian function.\n\n Parameters\n ----------\n x : array_like\n Independent variable.\n *p : parameters unpacked to A, mu, sigma\n A = amplitude, mu = centre, sigma = width\n\n Return\n ------\n array_like\n gaussian descriped by *p.", "id": "f2431:m6"} {"signature": "def calc_window_mean_std(s, min_points, ind=None):", "body": "max_points = np.sum(~np.isnan(s))n_points = max_points - min_pointsmean = np.full((n_points, s.size), np.nan)std = np.full((n_points, s.size), np.nan)if ind is None:ind = ~np.isnan(s)else:ind = ind & ~np.isnan(s)s = s[ind]for i, w in enumerate(range(min_points, s.size)):r = rolling_window(s, w, pad=np.nan)mean[i, ind] = r.sum() / wstd[i, ind] = (((r - mean[i, ind][:, np.newaxis])**).sum() / (w - ))**return mean, std", "docstring": "Apply fn to all contiguous regions in s that have at least min_points.", "id": "f2432:m1"} {"signature": "def threshold(values, threshold):", "body": "values = nominal_values(values)return (values < threshold, values >= threshold)", "docstring": "Return boolean arrays where a >= and < threshold.\n\nParameters\n----------\nvalues : array-like\n Array of real values.\nthreshold : float\n Threshold value\n\nReturns\n-------\n(below, above) : tuple or boolean arrays", "id": "f2433:m0"} {"signature": "def defrag(filt, threshold=, mode=''):", "body": "if bool_2_indices(filt) is None:return filtif mode == '':inds = bool_2_indices(~filt) + rep = Trueif mode == '':inds = bool_2_indices(filt) + rep = Falserem = (np.diff(inds) <= threshold)[:, ]cfilt = filt.copy()if any(rem):for lo, hi in inds[rem]:cfilt[lo:hi] = repreturn cfilt", "docstring": "'Defragment' a filter.\n\nParameters\n----------\nfilt : boolean array\n A filter\nthreshold : int\n Consecutive values equal to or below this threshold\n length are considered fragments, and will be removed.\nmode : str\n Wheter to change False fragments to True ('include')\n or True fragments to False ('exclude')\n\nReturns\n-------\ndefragmented filter : boolean array", "id": "f2433:m2"} {"signature": "def fuzzmatch(self, fuzzkey, multi=False):", "body": "keys, ratios = np.array([(f, seqm(None, fuzzkey, f).ratio()) for f in self.components.keys()]).Tmratio = max(ratios)if multi:return keys[ratios == mratio]else:if sum(ratios == mratio) == :return keys[ratios == mratio][]else:raise ValueError(\"\".format(fuzzkey) + ''.join(keys[ratios == mratio]) + \"\")", "docstring": "Identify a filter by fuzzy string matching.\n\nPartial ('fuzzy') matching performed by `fuzzywuzzy.fuzzy.ratio`\n\nParameters\n----------\nfuzzkey : str\n A string that partially matches one filter name more than the others.\n\nReturns\n-------\nThe name of the most closely matched filter. : str", "id": "f2434:c0:m9"} {"signature": "def off(self, analyte=None, filt=None):", "body": "if isinstance(analyte, str):analyte = [analyte]if isinstance(filt, (int, float)):filt = [filt]elif isinstance(filt, str):filt = self.fuzzmatch(filt, multi=True)if analyte is None:analyte = self.analytesif filt is None:filt = list(self.index.values())for a in analyte:for f in filt:if isinstance(f, int):f = self.index[f]try:self.switches[a][f] = Falseexcept KeyError:f = self.fuzzmatch(f, multi=False)self.switches[a][f] = Falsereturn", "docstring": "Turn off specified filter(s) for specified analyte(s).\n\nParameters\n----------\nanalyte : optional, str or array_like\n Name or list of names of analytes.\n Defaults to all analytes.\nfilt : optional. int, list of int or str\n Number(s) or partial string that corresponds to filter name(s).\n\nReturns\n-------\nNone", "id": "f2434:c0:m7"} {"signature": "def clean(self):", "body": "for f in sorted(self.components.keys()):unused = not any(self.switches[a][f] for a in self.analytes)if unused:self.remove(f)", "docstring": "Remove unused filters.", "id": "f2434:c0:m5"} {"signature": "def grab_filt(self, filt, analyte=None):", "body": "if isinstance(filt, str):if filt in self.components:if analyte is None:return self.components[filt]else:if self.switches[analyte][filt]:return self.components[filt]else:try:ind = self.make_fromkey(filt)except KeyError:print((\"\"\"\"))elif isinstance(filt, dict):try:ind = self.make_fromkey(filt[analyte])except ValueError:print((\"\"\"\"\"\"))elif filt:ind = self.make(analyte)else:ind = ~np.zeros(self.size, dtype=bool)return ind", "docstring": "Flexible access to specific filter using any key format.\n\nParameters\n----------\nf : str, dict or bool\n either logical filter expression, dict of expressions,\n or a boolean\nanalyte : str\n name of analyte the filter is for.\n\nReturns\n-------\narray_like\n boolean filter", "id": "f2434:c0:m12"} {"signature": "def on(self, analyte=None, filt=None):", "body": "if isinstance(analyte, str):analyte = [analyte]if isinstance(filt, (int, float)):filt = [filt]elif isinstance(filt, str):filt = self.fuzzmatch(filt, multi=True)if analyte is None:analyte = self.analytesif filt is None:filt = list(self.index.values())for a in analyte:for f in filt:if isinstance(f, (int, float)):f = self.index[int(f)]try:self.switches[a][f] = Trueexcept KeyError:f = self.fuzzmatch(f, multi=False)self.switches[a][f] = Truereturn", "docstring": "Turn on specified filter(s) for specified analyte(s).\n\nParameters\n----------\nanalyte : optional, str or array_like\n Name or list of names of analytes.\n Defaults to all analytes.\nfilt : optional. int, str or array_like\n Name/number or iterable names/numbers of filters.\n\nReturns\n-------\nNone", "id": "f2434:c0:m6"} {"signature": "def cluster_meanshift(data, bandwidth=None, bin_seeding=False, **kwargs):", "body": "if bandwidth is None:bandwidth = cl.estimate_bandwidth(data)ms = cl.MeanShift(bandwidth=bandwidth, bin_seeding=bin_seeding, **kwargs)ms.fit(data)labels = ms.labels_return labels, [np.nan]", "docstring": "Identify clusters using Meanshift algorithm.\n\nParameters\n----------\ndata : array_like\n array of size [n_samples, n_features].\nbandwidth : float or None\n If None, bandwidth is estimated automatically using\n sklean.cluster.estimate_bandwidth\nbin_seeding : bool\n Setting this option to True will speed up the algorithm.\n See sklearn documentation for full description.\n\nReturns\n-------\ndict\n boolean array for each identified cluster.", "id": "f2435:m0"} {"signature": "def cluster_DBSCAN(data, eps=None, min_samples=None,n_clusters=None, maxiter=, **kwargs):", "body": "if n_clusters is None:if eps is None:eps = db = cl.DBSCAN(eps=eps, min_samples=min_samples, **kwargs).fit(data)else:clusters = eps_temp = / niter = while clusters < n_clusters:clusters_last = clusterseps_temp *= db = cl.DBSCAN(eps=eps_temp, min_samples=min_samples, **kwargs).fit(data)clusters = (len(set(db.labels_)) -( if - in db.labels_ else ))if clusters < clusters_last:eps_temp *= / db = cl.DBSCAN(eps=eps_temp, min_samples=min_samples, **kwargs).fit(data)clusters = (len(set(db.labels_)) -( if - in db.labels_ else ))warnings.warn(('''''').format(n_clusters, clusters, eps_temp))breakniter += if niter == maxiter:warnings.warn(('''''''').format(maxiter, n_clusters))breaklabels = db.labels_core_samples_mask = np.zeros_like(labels)core_samples_mask[db.core_sample_indices_] = Truereturn labels, core_samples_mask", "docstring": "Identify clusters using DBSCAN algorithm.\n\nParameters\n----------\ndata : array_like\n array of size [n_samples, n_features].\neps : float\n The minimum 'distance' points must be apart for them to be in the\n same cluster. Defaults to 0.3. Note: If the data are normalised\n (they should be for DBSCAN) this is in terms of total sample\n variance. Normalised data have a mean of 0 and a variance of 1.\nmin_samples : int\n The minimum number of samples within distance `eps` required\n to be considered as an independent cluster.\nn_clusters : int\n The number of clusters expected. If specified, `eps` will be\n incrementally reduced until the expected number of clusters is\n found.\nmaxiter : int\n The maximum number of iterations DBSCAN will run.\n\nReturns\n-------\ndict\n boolean array for each identified cluster and core samples.", "id": "f2435:m2"} {"signature": "def pca_plot(pca, dt, xlabs=None, mode='', lognorm=True):", "body": "nc = pca.n_componentsf = np.arange(pca.n_features_)cs = list(itertools.combinations(range(nc), ))ind = ~np.apply_along_axis(any, , np.isnan(dt))cylim = (pca.components_.min(), pca.components_.max())yd = cylim[] - cylim[]fig, axs = plt.subplots(nc, nc, figsize=[ * nc, nc * ], tight_layout=True)for x, y in zip(*np.triu_indices(nc)):if x == y:tax = axs[x, y]tax.bar(f, pca.components_[x], )tax.set_xticks([])tax.axhline(, zorder=-, c=(,,,))tax.set_ylim(cylim[] - * yd,cylim[] + * yd)for xi, yi, lab in zip(f, pca.components_[x], xlabs):if yi > :yo = yd * va = ''else:yo = yd * -va = ''tax.text(xi, yi + yo, lab, ha='', va=va, rotation=, fontsize=)else:xv = dt[ind, x]yv = dt[ind, y]if mode == '':axs[x, y].scatter(xv, yv, alpha=)axs[y, x].scatter(yv, xv, alpha=)if mode == '':if lognorm:norm = mpl.colors.LogNorm()else:norm = Noneaxs[x, y].hist2d(xv, yv, , cmap=plt.cm.Blues, norm=norm)axs[y, x].hist2d(yv, xv, , cmap=plt.cm.Blues, norm=norm)if x == :axs[y, x].set_ylabel(''.format(y + ))if y == nc - :axs[y, x].set_xlabel(''.format(x + ))return fig, axs, xv, yv", "docstring": "Plot a fitted PCA, and all components.", "id": "f2437:m1"} {"signature": "def read_data(data_file, dataformat, name_mode):", "body": "with open(data_file) as f:lines = f.readlines()if '' in dataformat.keys():meta = Bunch()for k, v in dataformat[''].items():try:out = re.search(v[-], lines[int(k)]).groups()except:raise ValueError(''.format(v[-], lines[int(k)]))for i in np.arange(len(v[])):meta[v[][i]] = out[i]else:meta = {}if name_mode == '':sample = os.path.basename(data_file).split('')[]elif name_mode == '':sample = meta['']else:sample = name_modecolumns = np.array(lines[dataformat['']['']].strip().split(dataformat['']['']))if '' in dataformat[''].keys():pr = re.compile(dataformat[''][''])analytes = [pr.match(c).groups()[] for c in columns if pr.match(c)]if '' in dataformat.keys():with open(data_file) as f:fbuffer = f.read()for k, v in dataformat[''].items():fbuffer = re.sub(k, v, fbuffer)read_data = np.genfromtxt(BytesIO(fbuffer.encode()),**dataformat['']).Telse:read_data = np.genfromtxt(data_file,**dataformat['']).Tdind = np.zeros(read_data.shape[], dtype=bool)for a in analytes:dind[columns == a] = Truedata = Bunch()data[''] = read_data[dataformat['']['']]if '' in dataformat['']:if isinstance(dataformat[''][''], (float, int)):time_mult = dataformat['']['']elif isinstance(dataformat[''][''], str):unit_multipliers = {'': /,'': /,'': }try:time_mult = unit_multipliers[dataformat['']['']]except:raise ValueError(\"\")data[''] *= time_multdata[''] = Bunch(zip(analytes, read_data[dind]))data[''] = np.nansum(read_data[dind], )return sample, analytes, data, meta", "docstring": "Load data_file described by a dataformat dict.\n\nParameters\n----------\ndata_file : str\n Path to data file, including extension.\ndataformat : dict\n A dataformat dict, see example below.\nname_mode : str\n How to identyfy sample names. If 'file_names' uses the\n input name of the file, stripped of the extension. If\n 'metadata_names' uses the 'name' attribute of the 'meta'\n sub-dictionary in dataformat. If any other str, uses this\n str as the sample name.\n\nExample\n-------\n>>>\n{'genfromtext_args': {'delimiter': ',',\n 'skip_header': 4}, # passed directly to np.genfromtxt\n 'column_id': {'name_row': 3, # which row contains the column names\n 'delimiter': ',', # delimeter between column names\n 'timecolumn': 0, # which column contains the 'time' variable\n 'pattern': '([A-z]{1,2}[0-9]{1,3})'}, # a regex pattern which captures the column names\n 'meta_regex': { # a dict of (line_no: ([descriptors], [regexs])) pairs\n 0: (['path'], '(.*)'),\n 2: (['date', 'method'], # MUST include date\n '([A-Z][a-z]+ [0-9]+ [0-9]{4}[ ]+[0-9:]+ [amp]+).* ([A-z0-9]+\\.m)')\n }\n}\n\nReturns\n-------\nsample, analytes, data, meta : tuple", "id": "f2439:m0"} {"signature": "def autorange(t, sig, gwin=, swin=None, win=,on_mult=(, ), off_mult=(, ),nbin=, transform='', thresh=None):", "body": "failed = []if swin is not None:sigs = fastsmooth(sig, swin)else:sigs = sigif transform == '':tsigs = np.log10(sigs)else:tsigs = sigsif thresh is None:bins = kde_x = np.linspace(tsigs.min(), tsigs.max(), bins)kde = gaussian_kde(tsigs)yd = kde.pdf(kde_x)mins = findmins(kde_x, yd) if len(mins) > :bkg = tsigs < (mins[]) else:bkg = np.ones(tsigs.size, dtype=bool)else:bkg = tsigs < threshfbkg = bkgfsig = ~bkgzeros = bool_2_indices(fsig)g = abs(fastgrad(sigs, gwin)) if zeros is not None:zeros = zeros.flatten()for z in zeros: if z - win < :lo = gwin // hi = int(z + win)elif z + win > (len(sig) - gwin // ):lo = int(z - win)hi = len(sig) - gwin // else:lo = int(z - win)hi = int(z + win)xs = t[lo:hi]ys = g[lo:hi]mid = (hi + lo) // tp = sigs[mid + ] > sigs[mid - ] c = t[z] width = (t[] - t[]) * try:pg, _ = curve_fit(gauss, xs, ys,p0=(np.nanmax(ys),c,width),sigma=(xs - c)** + )fwhm = abs( * pg[-] * np.sqrt( * np.log()))if tp:lim = np.array([-fwhm, fwhm]) * on_mult + pg[]else:lim = np.array([-fwhm, fwhm]) * off_mult + pg[]fbkg[(t > lim[]) & (t < lim[])] = Falsefsig[(t > lim[]) & (t < lim[])] = Falseexcept RuntimeError:failed.append([c, tp])passftrn = ~fbkg & ~fsigif len(failed) > :trns = t[bool_2_indices(ftrn)]tr_mean = (trns[:, ] - trns[:, ]).mean() / for f, tp in failed:if tp:ind = (t >= f - tr_mean *on_mult[]) & (t <= f + tr_mean * on_mult[])else:ind = (t >= f - tr_mean *off_mult[]) & (t <= f + tr_mean * off_mult[])fsig[ind] = Falsefbkg[ind] = Falseftrn[ind] = Falsereturn fbkg, fsig, ftrn, [f[] for f in failed]", "docstring": "Automatically separates signal and background in an on/off data stream.\n\n**Step 1: Thresholding.**\nThe background signal is determined using a gaussian kernel density\nestimator (kde) of all the data. Under normal circumstances, this\nkde should find two distinct data distributions, corresponding to\n'signal' and 'background'. The minima between these two distributions\nis taken as a rough threshold to identify signal and background\nregions. Any point where the trace crosses this thrshold is identified\nas a 'transition'.\n\n**Step 2: Transition Removal.**\nThe width of the transition regions between signal and background are\nthen determined, and the transitions are excluded from analysis. The\nwidth of the transitions is determined by fitting a gaussian to the\nsmoothed first derivative of the analyte trace, and determining its\nwidth at a point where the gaussian intensity is at at `conf` time the\ngaussian maximum. These gaussians are fit to subsets of the data\ncentered around the transitions regions determined in Step 1, +/- `win`\ndata points. The peak is further isolated by finding the minima and\nmaxima of a second derivative within this window, and the gaussian is\nfit to the isolated peak.\n\nParameters\n----------\nt : array-like\n Independent variable (usually time).\nsig : array-like\n Dependent signal, with distinctive 'on' and 'off' regions.\ngwin : int\n The window used for calculating first derivative.\n Defaults to 7.\nswin : int\n The window used for signal smoothing. If None, ``gwin // 2``.\nwin : int\n The width (c +/- win) of the transition data subsets.\n Defaults to 20.\non_mult and off_mult : tuple, len=2\n Control the width of the excluded transition regions, which is defined\n relative to the peak full-width-half-maximum (FWHM) of the transition\n gradient. The region n * FHWM below the transition, and m * FWHM above\n the tranision will be excluded, where (n, m) are specified in `on_mult`\n and `off_mult`.\n `on_mult` and `off_mult` apply to the off-on and on-off transitions,\n respectively.\n Defaults to (1.5, 1) and (1, 1.5).\ntransform : str\n How to transform the data. Default is 'log'.\n\nReturns\n-------\nfbkg, fsig, ftrn, failed : tuple\n where fbkg, fsig and ftrn are boolean arrays the same length as sig,\n that are True where sig is background, signal and transition, respecively.\n failed contains a list of transition positions where gaussian fitting\n has failed.", "id": "f2440:m0"} {"signature": "def autorange_components(t, sig, transform='', gwin=, swin=None,win=, on_mult=(, ), off_mult=(, ),thresh=None):", "body": "failed = []if swin is not None:sigs = fastsmooth(sig, swin)else:sigs = sigif transform == '':tsigs = np.log10(sigs)tsig = np.log10(sig)else:tsigs = sigstsig = sigif thresh is None:bins = kde_x = np.linspace(tsigs.min(), tsigs.max(), bins)kde = gaussian_kde(tsigs)yd = kde.pdf(kde_x)mins = findmins(kde_x, yd) if len(mins) > :bkg = tsigs < (mins[]) thresh = mins[]else:bkg = np.ones(tsigs.size, dtype=bool)else:bkg = tsigs < threshfbkg = bkgfsig = ~bkgzeros = bool_2_indices(fsig)g = abs(fastgrad(sigs, gwin)) if zeros is not None:zeros = zeros.flatten()trans = dict(zeros=zeros.flatten(),lohi=[],pgs=[],excl=[],tps=[],failed=[],xs=[],ys=[])for z in zeros: if z - win < :lo = gwin // hi = int(z + win)elif z + win > (len(sig) - gwin // ):lo = int(z - win)hi = len(sig) - gwin // else:lo = int(z - win)hi = int(z + win)xs = t[lo:hi]ys = g[lo:hi]trans[''].append(xs)trans[''].append(ys)trans[''].append([lo, hi])mid = (hi + lo) // tp = sigs[mid + ] > sigs[mid - ] trans[''].append(tp)c = t[z] width = (t[] - t[]) * try:pg, _ = curve_fit(gauss, xs, ys,p0=(np.nanmax(ys),c,width),sigma=(xs - c)** + )trans[''].append(pg)fwhm = abs( * pg[-] * np.sqrt( * np.log()))if tp:lim = np.array([-fwhm, fwhm]) * on_mult + pg[]else:lim = np.array([-fwhm, fwhm]) * off_mult + pg[]trans[''].append(lim)fbkg[(t > lim[]) & (t < lim[])] = Falsefsig[(t > lim[]) & (t < lim[])] = Falsefailed.append(False)except RuntimeError:failed.append(True)trans[''].append([np.nan, np.nan])trans[''].append([np.nan, np.nan, np.nan])trans[''].append([np.nan, np.nan])trans[''].append(tp)passelse:zeros = []return t, sig, sigs, tsig, tsigs, kde_x, yd, g, trans, thresh", "docstring": "Returns the components underlying the autorange algorithm.\n\nReturns\n-------\nt : array-like\n Time axis (independent variable)\nsig : array-like\n Raw signal (dependent variable)\nsigs : array-like\n Smoothed signal (swin)\ntsig : array-like\n Transformed raw signal (transform)\ntsigs : array-like\n Transformed smoothed signal (transform, swin)\nkde_x : array-like\n kernel density estimate of smoothed signal.\nyd : array-like\n bins of kernel density estimator.\ng : array-like\n gradient of smoothed signal (swin, gwin)\ntrans : dict\n per-transition data.\nthresh : float\n threshold identified from kernel density plot", "id": "f2440:m1"} {"signature": "@_logdef filter_gradient_threshold(self, analyte, win, threshold, recalc=True):", "body": "params = locals()del(params[''])if recalc or not self.grads_calced:self.grads = calc_grads(self.Time, self.focus,[analyte], win)self.grads_calced = Truebelow, above = filters.threshold(abs(self.grads[analyte]), threshold)setn = self.filt.maxset + self.filt.add(analyte + '',below,''.format(threshold) + analyte,params, setn=setn)self.filt.add(analyte + '',above,''.format(threshold) + analyte,params, setn=setn)", "docstring": "Apply gradient threshold filter.\n\nGenerates threshold filters for the given analytes above and below\nthe specified threshold.\n\nTwo filters are created with prefixes '_above' and '_below'.\n '_above' keeps all the data above the threshold.\n '_below' keeps all the data below the threshold.\n\ni.e. to select data below the threshold value, you should turn the\n'_above' filter off.\n\nParameters\n----------\nanalyte : str\n Description of `analyte`.\nthreshold : float\n Description of `threshold`.\nwin : int\n Window used to calculate gradients (n points)\nrecalc : bool\n Whether or not to re-calculate the gradients.\n\nReturns\n-------\nNone", "id": "f2443:c0:m13"} {"signature": "def mkrngs(self):", "body": "bbool = bool_2_indices(self.bkg)if bbool is not None:self.bkgrng = self.Time[bbool]else:self.bkgrng = [[np.nan, np.nan]]sbool = bool_2_indices(self.sig)if sbool is not None:self.sigrng = self.Time[sbool]else:self.sigrng = [[np.nan, np.nan]]tbool = bool_2_indices(self.trn)if tbool is not None:self.trnrng = self.Time[tbool]else:self.trnrng = [[np.nan, np.nan]]self.ns = np.zeros(self.Time.size)n = for i in range(len(self.sig) - ):if self.sig[i]:self.ns[i] = nif self.sig[i] and ~self.sig[i + ]:n += self.n = int(max(self.ns)) return", "docstring": "Transform boolean arrays into list of limit pairs.\n\nGets Time limits of signal/background boolean arrays and stores them as\nsigrng and bkgrng arrays. These arrays can be saved by 'save_ranges' in\nthe analyse object.", "id": "f2443:c0:m5"} {"signature": "def get_params(self):", "body": "outputs = ['','','','','']out = {}for o in outputs:out[o] = getattr(self, o)out[''] = self.filt.paramsout[''] = self.filt.sequenceout[''] = self.filt.make_keydict()return out", "docstring": "Returns paramters used to process data.\n\nReturns\n-------\ndict\n dict of analysis parameters", "id": "f2443:c0:m29"} {"signature": "@_logdef filter_trim(self, start=, end=, filt=True):", "body": "params = locals()del(params[''])f = self.filt.grab_filt(filt)nf = filters.trim(f, start, end)self.filt.add('',nf,''.format(start, end),params, setn=self.filt.maxset + )", "docstring": "Remove points from the start and end of filter regions.\n\nParameters\n----------\nstart, end : int\n The number of points to remove from the start and end of\n the specified filter.\nfilt : valid filter string or bool\n Which filter to trim. If True, applies to currently active\n filters.", "id": "f2443:c0:m19"} {"signature": "@_logdef ablation_times(self):", "body": "ats = {}for n in np.arange(self.n) + :t = self.Time[self.ns == n]ats[n - ] = t.max() - t.min()return ats", "docstring": "Function for calculating the ablation time for each\nablation.\n\nReturns\n-------\n dict of times for each ablation.", "id": "f2443:c0:m11"} {"signature": "@_logdef tplot(self, analytes=None, figsize=[, ], scale='', filt=None,ranges=False, stats=False, stat='', err='',focus_stage=None, err_envelope=False, ax=None):", "body": "return plot.tplot(self=self, analytes=analytes, figsize=figsize, scale=scale, filt=filt,ranges=ranges, stats=stats, stat=stat, err=err,focus_stage=focus_stage, err_envelope=err_envelope, ax=ax)", "docstring": "Plot analytes as a function of Time.\n\nParameters\n----------\nanalytes : array_like\n list of strings containing names of analytes to plot.\n None = all analytes.\nfigsize : tuple\n size of final figure.\nscale : str or None\n 'log' = plot data on log scale\nfilt : bool, str or dict\n False: plot unfiltered data.\n True: plot filtered data over unfiltered data.\n str: apply filter key to all analytes\n dict: apply key to each analyte in dict. Must contain all\n analytes plotted. Can use self.filt.keydict.\nranges : bool\n show signal/background regions.\nstats : bool\n plot average and error of each trace, as specified by `stat` and\n `err`.\nstat : str\n average statistic to plot.\nerr : str\n error statistic to plot.\n\nReturns\n-------\nfigure, axis", "id": "f2443:c0:m23"} {"signature": "@_logdef filter_exclude_downhole(self, threshold, filt=True):", "body": "f = self.filt.grab_filt(filt)if self.n == :nfilt = filters.exclude_downhole(f, threshold)else:nfilt = []for i in range(self.n):nf = self.ns == i + nfilt.append(filters.exclude_downhole(f & nf, threshold))nfilt = np.apply_along_axis(any, , nfilt)self.filt.add(name=''.format(threshold),filt=nfilt,info=''.format(threshold),params=(threshold, filt))", "docstring": "Exclude all points down-hole (after) the first excluded data.\n\nParameters\n----------\nthrehold : int\n The minimum number of contiguous excluded data points\n that must exist before downhole exclusion occurs.\nfile : valid filter string or bool\n Which filter to consider. If True, applies to currently active\n filters.", "id": "f2443:c0:m20"} {"signature": "@_logdef filter_threshold(self, analyte, threshold):", "body": "params = locals()del(params[''])below, above = filters.threshold(self.focus[analyte], threshold)setn = self.filt.maxset + self.filt.add(analyte + '',below,''.format(threshold) + analyte,params, setn=setn)self.filt.add(analyte + '',above,''.format(threshold) + analyte,params, setn=setn)", "docstring": "Apply threshold filter.\n\nGenerates threshold filters for the given analytes above and below\nthe specified threshold.\n\nTwo filters are created with prefixes '_above' and '_below'.\n '_above' keeps all the data above the threshold.\n '_below' keeps all the data below the threshold.\n\ni.e. to select data below the threshold value, you should turn the\n'_above' filter off.\n\nParameters\n----------\nanalyte : TYPE\n Description of `analyte`.\nthreshold : TYPE\n Description of `threshold`.\n\nReturns\n-------\nNone", "id": "f2443:c0:m12"} {"signature": "@_logdef gradient_crossplot(self, analytes=None, win=, lognorm=True,bins=, filt=False, samples=None,subset=None, figsize=(, ), save=False,colourful=True, mode='', recalc=True, **kwargs):", "body": "if analytes is None:analytes = self.analytesif self.focus_stage in ['', '']:analytes = [a for a in analytes if self.internal_standard not in a]try:analytes = sorted(analytes, key=lambda x: float(re.findall('', x)[]))except IndexError:analytes = sorted(analytes)samples = self._get_samples(subset)self.get_gradients(analytes=analytes, win=win, filt=filt, subset=subset, recalc=recalc)fig, axes = plot.crossplot(dat=self.gradients, keys=analytes, lognorm=lognorm,bins=bins, figsize=figsize, colourful=colourful,focus_stage=self.focus_stage, cmap=self.cmaps,denominator=self.internal_standard, mode=mode)if save:fig.savefig(self.report_dir + '', dpi=)return fig, axes", "docstring": "Plot analyte gradients against each other.\n\nParameters\n----------\nanalytes : optional, array_like or str\n The analyte(s) to plot. Defaults to all analytes.\nlognorm : bool\n Whether or not to log normalise the colour scale\n of the 2D histogram.\nbins : int\n The number of bins in the 2D histogram.\nfilt : str, dict or bool\n Either logical filter expression contained in a str,\n a dict of expressions specifying the filter string to\n use for each analyte or a boolean. Passed to `grab_filt`.\nfigsize : tuple\n Figure size (width, height) in inches.\nsave : bool or str\n If True, plot is saves as 'crossplot.png', if str plot is\n saves as str.\ncolourful : bool\n Whether or not the plot should be colourful :).\nmode : str\n 'hist2d' (default) or 'scatter'\nrecalc : bool\n Whether to re-calculate the gradients, or use existing gradients.\n\nReturns\n-------\n(fig, axes)", "id": "f2444:c0:m46"} {"signature": "@_logdef ratio(self, internal_standard=None):", "body": "if '' not in self.stages_complete:raise RuntimeError('')if internal_standard is not None:self.internal_standard = internal_standardself.minimal_analytes.update([internal_standard])with self.pbar.set(total=len(self.data), desc='') as prog:for s in self.data.values():s.ratio(internal_standard=self.internal_standard)prog.update()self.stages_complete.update([''])self.focus_stage = ''return", "docstring": "Calculates the ratio of all analytes to a single analyte.\n\nParameters\n----------\ninternal_standard : str\n The name of the analyte to divide all other analytes\n by.\n\nReturns\n-------\nNone", "id": "f2444:c0:m13"} {"signature": "def get_focus(self, filt=False, samples=None, subset=None, nominal=False):", "body": "if samples is not None:subset = self.make_subset(samples)samples = self._get_samples(subset)focus = {'': []}focus.update({a: [] for a in self.analytes})for sa in samples:s = self.data[sa]focus[''].append(s.uTime)ind = s.filt.grab_filt(filt)for a in self.analytes:tmp = s.focus[a].copy()tmp[~ind] = np.nanfocus[a].append(tmp)if nominal:self.focus.update({k: nominal_values(np.concatenate(v)) for k, v, in focus.items()})else:self.focus.update({k: np.concatenate(v) for k, v, in focus.items()})return", "docstring": "Collect all data from all samples into a single array.\nData from standards is not collected.\n\nParameters\n----------\nfilt : str, dict or bool\n Either logical filter expression contained in a str,\n a dict of expressions specifying the filter string to\n use for each analyte or a boolean. Passed to `grab_filt`.\nsamples : str or list\n which samples to get\nsubset : str or int\n which subset to get\n\nReturns\n-------\nNone", "id": "f2444:c0:m42"} {"signature": "@_logdef bkg_calc_weightedmean(self, analytes=None, weight_fwhm=None,n_min=, n_max=None, cstep=None,bkg_filter=False, f_win=, f_n_lim=, focus_stage=''):", "body": "if analytes is None:analytes = self.analytesself.bkg = Bunch()elif isinstance(analytes, str):analytes = [analytes]if weight_fwhm is None:weight_fwhm = self.get_background(n_min=n_min, n_max=n_max,bkg_filter=bkg_filter,f_win=f_win, f_n_lim=f_n_lim, focus_stage=focus_stage)if '' not in self.bkg.keys():if cstep is None:cstep = weight_fwhm / elif cstep > weight_fwhm:warnings.warn(\"\" +\"\")bkg_t = np.linspace(,self.max_time,self.max_time // cstep)self.bkg[''] = Bunch()self.bkg[''][''] = bkg_tmean, std, stderr = gauss_weighted_stats(self.bkg[''].uTime,self.bkg[''].loc[:, analytes].values,self.bkg[''][''],fwhm=weight_fwhm)for i, a in enumerate(analytes):self.bkg[''][a] = {'': mean[i],'': std[i],'': stderr[i]}", "docstring": "Background calculation using a gaussian weighted mean.\n\nParameters\n----------\nanalytes : str or iterable\n Which analyte or analytes to calculate.\nweight_fwhm : float\n The full-width-at-half-maximum of the gaussian used\n to calculate the weighted average.\nn_min : int\n Background regions with fewer than n_min points\n will not be included in the fit.\ncstep : float or None\n The interval between calculated background points.\nfilter : bool\n If true, apply a rolling filter to the isolated background regions\n to exclude regions with anomalously high values. If True, two parameters\n alter the filter's behaviour:\nf_win : int\n The size of the rolling window\nf_n_lim : float\n The number of standard deviations above the rolling mean\n to set the threshold.\nfocus_stage : str\n Which stage of analysis to apply processing to. \n Defaults to 'despiked' if present, or 'rawdata' if not. \n Can be one of:\n * 'rawdata': raw data, loaded from csv file.\n * 'despiked': despiked data.\n * 'signal'/'background': isolated signal and background data.\n Created by self.separate, after signal and background\n regions have been identified by self.autorange.\n * 'bkgsub': background subtracted data, created by \n self.bkg_correct\n * 'ratios': element ratio data, created by self.ratio.\n * 'calibrated': ratio data calibrated to standards, created by self.calibrate.", "id": "f2444:c0:m8"} {"signature": "def minimal_export(self, target_analytes=None, path=None):", "body": "if target_analytes is None:target_analytes = self.analytesif isinstance(target_analytes, str):target_analytes = [target_analytes]self.minimal_analytes.update(target_analytes)zip_archive = Falseif path is None:path = self.export_dir + ''if path.endswith(''):path = path.replace('', '')zip_archive = Trueif not os.path.isdir(path):os.mkdir(path)self._minimal_export_traces(path + '', analytes=self.minimal_analytes)log_header = ['' %(time.strftime('')),'']if hasattr(self, ''):log_header.append('')els = np.unique([re.sub('', '', a) for a in self.minimal_analytes])srmdat = []for e in els:srmdat.append(self.srmdat.loc[self.srmdat.element == e, :])srmdat = pd.concat(srmdat)with open(path + '', '') as f:f.write(srmdat.to_csv())if hasattr(self, ''):with open(path + '', '') as f:f.write(self.custom_stat_functions)log_header.append('')log_header.append('')lss = [(i, l) for i, l in enumerate(self.log) if '' in l]rep = re.compile(\"\")for i, l in lss:self.log[i] = rep.sub(r'' + str(self.stats_calced) + r'', l)self.save_log(path, '', header=log_header)if zip_archive:utils.zipdir(directory=path, delete=True)return", "docstring": "Exports a analysis parameters, standard info and a minimal dataset,\nwhich can be imported by another user.\n\nParameters\n----------\ntarget_analytes : str or iterable\n Which analytes to include in the export. If specified, the export\n will contain these analytes, and all other analytes used during\n data processing (e.g. during filtering). If not specified, \n all analytes are exported.\npath : str\n Where to save the minimal export. \n If it ends with .zip, a zip file is created.\n If it's a folder, all data are exported to a folder.", "id": "f2444:c0:m60"} {"signature": "@_logdef calibrate(self, analytes=None, drift_correct=True,srms_used=['', '', ''],zero_intercept=True, n_min=, reload_srm_database=False):", "body": "if analytes is None:analytes = self.analytes[self.analytes != self.internal_standard]elif isinstance(analytes, str):analytes = [analytes]if not hasattr(self, ''):self.srm_id_auto(srms_used=srms_used, n_min=n_min, reload_srm_database=reload_srm_database)if not hasattr(self, ''):gTime = self.stdtab.gTime.unique()self.calib_params = pd.DataFrame(columns=pd.MultiIndex.from_product([analytes, ['']]),index=gTime)calib_analytes = self.srmtabs.index.get_level_values().unique()if zero_intercept:fn = lambda x, m: x * melse:fn = lambda x, m, c: x * m + cfor a in calib_analytes:if zero_intercept:if (a, '') in self.calib_params:self.calib_params.drop((a, ''), , inplace=True)if drift_correct:for g in self.stdtab.gTime.unique():ind = idx[a, :, :, g]if self.srmtabs.loc[ind].size == :continuemeas = self.srmtabs.loc[ind, '']srm = self.srmtabs.loc[ind, '']merr = self.srmtabs.loc[ind, '']serr = self.srmtabs.loc[ind, '']sigma = np.sqrt(merr** + serr**)if len(meas) > :p, cov = curve_fit(fn, meas, srm, sigma=sigma)pe = unc.correlated_values(p, cov) self.calib_params.loc[g, (a, '')] = pe[]if not zero_intercept:self.calib_params.loc[g, (a, '')] = pe[]else:self.calib_params.loc[g, (a, '')] = (un.uarray(srm, serr) / un.uarray(meas, merr))[]if not zero_intercept:self.calib_params.loc[g, (a, '')] = else:ind = idx[a, :, :, :]meas = self.srmtabs.loc[ind, '']srm = self.srmtabs.loc[ind, '']merr = self.srmtabs.loc[ind, '']serr = self.srmtabs.loc[ind, '']sigma = np.sqrt(merr** + serr**)if len(meas) > :p, cov = curve_fit(fn, meas, srm, sigma=sigma)pe = unc.correlated_values(p, cov) self.calib_params.loc[:, (a, '')] = pe[]if not zero_intercept:self.calib_params.loc[:, (a, '')] = pe[]else:self.calib_params.loc[:, (a, '')] = (un.uarray(srm, serr) / un.uarray(meas, merr))[]if not zero_intercept:self.calib_params.loc[:, (a, '')] = if self.calib_params.index.min() == :self.calib_params.drop(, inplace=True)self.calib_params.drop(self.calib_params.index.max(), inplace=True)self.calib_params.loc[, :] = self.calib_params.loc[self.calib_params.index.min(), :]maxuT = np.max([d.uTime.max() for d in self.data.values()]) self.calib_params.loc[maxuT, :] = self.calib_params.loc[self.calib_params.index.max(), :]self.calib_params.sort_index(, inplace=True)self.calib_params.sort_index(, inplace=True)self.calib_ps = Bunch()for a in analytes:self.calib_ps[a] = {'': un_interp1d(self.calib_params.index.values,self.calib_params.loc[:, (a, '')].values)}if not zero_intercept:self.calib_ps[a][''] = un_interp1d(self.calib_params.index.values,self.calib_params.loc[:, (a, '')].values)with self.pbar.set(total=len(self.data), desc='') as prog:for d in self.data.values():d.calibrate(self.calib_ps, analytes)prog.update()markers = '' if not hasattr(self, ''):self.srms_used = set(srms_used)else:self.srms_used.update(srms_used)self.srm_mdict = {k: markers[i] for i, k in enumerate(self.srms_used)}self.stages_complete.update([''])self.focus_stage = ''return", "docstring": "Calibrates the data to measured SRM values.\n\nAssumes that y intercept is zero.\n\nParameters\n---------- \nanalytes : str or iterable\n Which analytes you'd like to calibrate. Defaults to all.\ndrift_correct : bool\n Whether to pool all SRM measurements into a single calibration,\n or vary the calibration through the run, interpolating\n coefficients between measured SRMs.\nsrms_used : str or iterable\n Which SRMs have been measured. Must match names given in\n SRM data file *exactly*.\nn_min : int\n The minimum number of data points an SRM measurement\n must have to be included.\n\nReturns\n-------\nNone", "id": "f2444:c0:m18"} {"signature": "@_logdef trace_plots(self, analytes=None, samples=None, ranges=False,focus=None, outdir=None, filt=None, scale='',figsize=[, ], stats=False, stat='',err='', subset=''):", "body": "if focus is None:focus = self.focus_stageif outdir is None:outdir = self.report_dir + '' + focusif not os.path.isdir(outdir):os.mkdir(outdir)if subset is not None:samples = self._get_samples(subset)elif samples is None:samples = self.subsets['']elif isinstance(samples, str):samples = [samples]with self.pbar.set(total=len(samples), desc='') as prog:for s in samples:f, a = self.data[s].tplot(analytes=analytes, figsize=figsize,scale=scale, filt=filt,ranges=ranges, stats=stats,stat=stat, err=err, focus_stage=focus)f.savefig(outdir + '' + s + '')plt.close(f)prog.update()return", "docstring": "Plot analytes as a function of time.\n\nParameters\n----------\nanalytes : optional, array_like or str\n The analyte(s) to plot. Defaults to all analytes.\nsamples: optional, array_like or str\n The sample(s) to plot. Defaults to all samples.\nranges : bool\n Whether or not to show the signal/backgroudn regions\n identified by 'autorange'.\nfocus : str\n The focus 'stage' of the analysis to plot. Can be\n 'rawdata', 'despiked':, 'signal', 'background',\n 'bkgsub', 'ratios' or 'calibrated'.\noutdir : str\n Path to a directory where you'd like the plots to be\n saved. Defaults to 'reports/[focus]' in your data directory.\nfilt : str, dict or bool\n Either logical filter expression contained in a str,\n a dict of expressions specifying the filter string to\n use for each analyte or a boolean. Passed to `grab_filt`.\nscale : str\n If 'log', plots the data on a log scale.\nfigsize : array_like\n Array of length 2 specifying figure [width, height] in\n inches.\nstats : bool\n Whether or not to overlay the mean and standard deviations\n for each trace.\nstat, err: str\n The names of the statistic and error components to plot.\n Deafaults to 'nanmean' and 'nanstd'.\n\n\nReturns\n-------\nNone", "id": "f2444:c0:m50"} {"signature": "def filter_nremoved(self, filt=True, quiet=False):", "body": "rminfo = {}for n in self.subsets['']:s = self.data[n]rminfo[n] = s.filt_nremoved(filt)if not quiet:maxL = max([len(s) for s in rminfo.keys()])print(''.format(string='', number=maxL + ) +''.format(total='') +''.format(removed='') +''.format(percent=''))for k, (ntot, nfilt, pcrm) in rminfo.items():print(''.format(string=k, number=maxL + ) +''.format(total=ntot) +''.format(removed=nfilt) +''.format(percent=pcrm))return rminfo", "docstring": "Report how many data are removed by the active filters.", "id": "f2444:c0:m37"} {"signature": "@_logdef crossplot(self, analytes=None, lognorm=True,bins=, filt=False, samples=None,subset=None, figsize=(, ), save=False,colourful=True, mode='', **kwargs):", "body": "if analytes is None:analytes = self.analytesif self.focus_stage in ['', '']:analytes = [a for a in analytes if self.internal_standard not in a]try:analytes = sorted(analytes, key=lambda x: float(re.findall('', x)[]))except IndexError:analytes = sorted(analytes)self.get_focus(filt=filt, samples=samples, subset=subset)fig, axes = plot.crossplot(dat=self.focus, keys=analytes, lognorm=lognorm,bins=bins, figsize=figsize, colourful=colourful,focus_stage=self.focus_stage, cmap=self.cmaps,denominator=self.internal_standard, mode=mode)if save or isinstance(save, str):if isinstance(save, str):fig.savefig(self.report_dir + '' + save, dpi=) else:fig.savefig(self.report_dir + '', dpi=)return fig, axes", "docstring": "Plot analytes against each other.\n\nParameters\n----------\nanalytes : optional, array_like or str\n The analyte(s) to plot. Defaults to all analytes.\nlognorm : bool\n Whether or not to log normalise the colour scale\n of the 2D histogram.\nbins : int\n The number of bins in the 2D histogram.\nfilt : str, dict or bool\n Either logical filter expression contained in a str,\n a dict of expressions specifying the filter string to\n use for each analyte or a boolean. Passed to `grab_filt`.\nfigsize : tuple\n Figure size (width, height) in inches.\nsave : bool or str\n If True, plot is saves as 'crossplot.png', if str plot is\n saves as str.\ncolourful : bool\n Whether or not the plot should be colourful :).\nmode : str\n 'hist2d' (default) or 'scatter'\n\nReturns\n-------\n(fig, axes)", "id": "f2444:c0:m45"} {"signature": "def find_expcoef(self, nsd_below=, plot=False,trimlim=None, autorange_kwargs={}):", "body": "print('')def findtrim(tr, lim=None):trr = np.roll(tr, -)trr[-] = if lim is None:lim = * np.nanmax(tr - trr)ind = (tr - trr) >= limreturn np.arange(len(ind))[ind ^ np.roll(ind, -)][]if not hasattr(self.stds[], ''):for s in self.stds:s.autorange(**autorange_kwargs, ploterrs=False)trans = []times = []for v in self.stds:for trnrng in v.trnrng[-::-]:tr = minmax_scale(v.data[''][(v.Time > trnrng[]) & (v.Time < trnrng[])])sm = np.apply_along_axis(np.nanmean, ,rolling_window(tr, , pad=))sm[] = sm[]trim = findtrim(sm, trimlim) + trans.append(minmax_scale(tr[trim:]))times.append(np.arange(tr[trim:].size) *np.diff(v.Time[:]))times = np.concatenate(times)times = np.round(times, )trans = np.concatenate(trans)ti = []tr = []for t in np.unique(times):ti.append(t)tr.append(np.nanmin(trans[times == t]))def expfit(x, e):\"\"\"\"\"\"return np.exp(e * x)ep, ecov = curve_fit(expfit, ti, tr, p0=(-))eeR2 = R2calc(trans, expfit(times, ep))if plot:fig, ax = plt.subplots(, , figsize=[, ])ax.scatter(times, trans, alpha=, color='', marker='', zorder=-)ax.scatter(ti, tr, alpha=, color='', marker='')fitx = np.linspace(, max(ti))ax.plot(fitx, expfit(fitx, ep), color='', label='')ax.plot(fitx, expfit(fitx, ep - nsd_below * np.diag(ecov)**, ),color='', label='')ax.text(, ,('''') % (ep,np.diag(ecov)**,eeR2,ep - nsd_below * np.diag(ecov)**),transform=ax.transAxes, ha='', va='', size=)ax.set_xlim(, ax.get_xlim()[-])ax.set_xlabel('')ax.set_ylim(-, )ax.set_ylabel('')plt.legend()if isinstance(plot, str):fig.savefig(plot)self.expdecay_coef = ep - nsd_below * np.diag(ecov)**print(''.format(self.expdecay_coef[]))return", "docstring": "Determines exponential decay coefficient for despike filter.\n\nFits an exponential decay function to the washout phase of standards\nto determine the washout time of your laser cell. The exponential\ncoefficient reported is `nsd_below` standard deviations below the\nfitted exponent, to ensure that no real data is removed.\n\nTotal counts are used in fitting, rather than a specific analyte.\n\nParameters\n----------\nnsd_below : float\n The number of standard deviations to subtract from the fitted\n coefficient when calculating the filter exponent.\nplot : bool or str\n If True, creates a plot of the fit, if str the plot is to the\n location specified in str.\ntrimlim : float\n A threshold limit used in determining the start of the\n exponential decay region of the washout. Defaults to half\n the increase in signal over background. If the data in\n the plot don't fall on an exponential decay line, change\n this number. Normally you'll need to increase it.\n\nReturns\n-------\nNone", "id": "f2444:c0:m5"} {"signature": "@_logdef gradient_plots(self, analytes=None, win=, samples=None, ranges=False,focus=None, outdir=None,figsize=[, ], subset=''):", "body": "if focus is None:focus = self.focus_stageif outdir is None:outdir = self.report_dir + '' + focus + ''if not os.path.isdir(outdir):os.mkdir(outdir)if subset is not None:samples = self._get_samples(subset)elif samples is None:samples = self.subsets['']elif isinstance(samples, str):samples = [samples]with self.pbar.set(total=len(samples), desc='') as prog:for s in samples:f, a = self.data[s].gplot(analytes=analytes, win=win, figsize=figsize,ranges=ranges, focus_stage=focus)f.savefig(outdir + '' + s + '')plt.close(f)prog.update()return", "docstring": "Plot analyte gradients as a function of time.\n\nParameters\n----------\nanalytes : optional, array_like or str\n The analyte(s) to plot. Defaults to all analytes.\nsamples: optional, array_like or str\n The sample(s) to plot. Defaults to all samples.\nranges : bool\n Whether or not to show the signal/backgroudn regions\n identified by 'autorange'.\nfocus : str\n The focus 'stage' of the analysis to plot. Can be\n 'rawdata', 'despiked':, 'signal', 'background',\n 'bkgsub', 'ratios' or 'calibrated'.\noutdir : str\n Path to a directory where you'd like the plots to be\n saved. Defaults to 'reports/[focus]' in your data directory.\nfilt : str, dict or bool\n Either logical filter expression contained in a str,\n a dict of expressions specifying the filter string to\n use for each analyte or a boolean. Passed to `grab_filt`.\nscale : str\n If 'log', plots the data on a log scale.\nfigsize : array_like\n Array of length 2 specifying figure [width, height] in\n inches.\nstats : bool\n Whether or not to overlay the mean and standard deviations\n for each trace.\nstat, err: str\n The names of the statistic and error components to plot.\n Deafaults to 'nanmean' and 'nanstd'.\n\n\nReturns\n-------\nNone", "id": "f2444:c0:m51"} {"signature": "def crossplot_filters(self, filter_string, analytes=None,samples=None, subset=None, filt=None):", "body": "if analytes is None:analytes = [a for a in self.analytes if self.internal_standard not in a]if samples is None:samples = self._get_samples(subset)filts = self.data[samples[]].filt.components.keys()cfilts = [f for f in filts if filter_string in f]flab = re.compile('') self.get_focus(subset=subset, filt=filt)numvars = len(analytes)fig, axes = plt.subplots(nrows=numvars, ncols=numvars,figsize=(, ))fig.subplots_adjust(hspace=, wspace=)for ax in axes.flat:ax.xaxis.set_visible(False)ax.yaxis.set_visible(False)if ax.is_first_col():ax.yaxis.set_ticks_position('')if ax.is_last_col():ax.yaxis.set_ticks_position('')if ax.is_first_row():ax.xaxis.set_ticks_position('')if ax.is_last_row():ax.xaxis.set_ticks_position('')cmlist = ['', '', '', '','', '', '', '','', '', '', '','', '', '', '', '', '']focus = {k: nominal_values(v) for k, v in self.focus.items()}udict = {a: unitpicker(np.nanmean(focus[a]),focus_stage=self.focus_stage,denominator=self.internal_standard) for a in analytes}rdict = {a: (np.nanmin(focus[a] * udict[a][]),np.nanmax(focus[a] * udict[a][])) for a in analytes}for f in cfilts:self.get_focus(f, subset=subset)focus = {k: nominal_values(v) for k, v in self.focus.items()}lab = flab.match(f).groups()[]axes[, ].scatter([], [], s=, label=lab)for i, j in zip(*np.triu_indices_from(axes, k=)):ai = analytes[i]aj = analytes[j]pi = focus[ai][~np.isnan(focus[ai])] * udict[ai][]pj = focus[aj][~np.isnan(focus[aj])] * udict[aj][]axes[i, j].scatter(pj, pi, alpha=, s=, lw=, edgecolor='')axes[j, i].scatter(pi, pj, alpha=, s=, lw=, edgecolor='')axes[i, j].set_ylim(*rdict[ai])axes[i, j].set_xlim(*rdict[aj])axes[j, i].set_ylim(*rdict[aj])axes[j, i].set_xlim(*rdict[ai])for a, n in zip(analytes, np.arange(len(analytes))):axes[n, n].annotate(a + '' + udict[a][], (, ),xycoords='',ha='', va='')axes[n, n].set_xlim(*rdict[a])axes[n, n].set_ylim(*rdict[a])axes[, ].legend(loc='', title=filter_string)for i, j in zip(range(numvars), itertools.cycle((-, ))):axes[j, i].xaxis.set_visible(True)for label in axes[j, i].get_xticklabels():label.set_rotation()axes[i, j].yaxis.set_visible(True)return fig, axes", "docstring": "Plot the results of a group of filters in a crossplot.\n\nParameters\n----------\nfilter_string : str\n A string that identifies a group of filters.\n e.g. 'test' would plot all filters with 'test' in the\n name.\nanalytes : optional, array_like or str\n The analyte(s) to plot. Defaults to all analytes.\n\nReturns\n-------\nfig, axes objects", "id": "f2444:c0:m49"} {"signature": "@_logdef make_subset(self, samples=None, name=None):", "body": "for k, v in self.subsets.items():if set(v) == set(samples) and k != '':return kif isinstance(samples, str):samples = [samples]not_exists = [s for s in samples if s not in self.subsets['']]if len(not_exists) > :raise ValueError(''.join(not_exists) + '')if name is None:name = max([-] + [x for x in self.subsets.keys() if isinstance(x, int)]) + self._subset_names.append(name)if samples is not None:self.subsets[name] = samplesfor s in samples:try:self.subsets[''].remove(s)except ValueError:passself._has_subsets = Truereturn name", "docstring": "Creates a subset of samples, which can be treated independently.\n\nParameters\n----------\nsamples : str or array - like\n Name of sample, or list of sample names.\nname : (optional) str or number\n The name of the sample group. Defaults to n + 1, where n is\n the highest existing group number", "id": "f2444:c0:m19"} {"signature": "@_logdef filter_gradient_threshold_percentile(self, analyte, percentiles, level='', win=, filt=False,samples=None, subset=None):", "body": "params = locals()del(params[''])if samples is not None:subset = self.make_subset(samples)samples = self._get_samples(subset)self.minimal_analytes.update([analyte])self.get_gradients(analytes=[analyte], win=win, filt=filt, subset=subset)grad = self.gradients[analyte][~np.isnan(self.gradients[analyte])]if isinstance(percentiles, (int, float)):percentiles = [percentiles]if level == '':lims = np.percentile(grad, percentiles)with self.pbar.set(total=len(samples), desc='') as prog:for s in samples:d = self.data[s]setn = d.filt.maxset + g = calc_grads(d.Time, d.focus, [analyte], win)[analyte]if level == '':gt = nominal_values(g)lims = np.percentile(gt[~np.isnan(gt)], percentiles)if len(lims) == :above = g >= lims[]below = g < lims[]d.filt.add(analyte + ''.format(percentiles[]),below,''.format(percentiles[], analyte, lims[]),params, setn=setn)d.filt.add(analyte + ''.format(percentiles[]),above,''.format(percentiles[], analyte, lims[]),params, setn=setn)elif len(lims) == :inside = (g >= min(lims)) & (g <= max(lims))outside = (g < min(lims)) | (g > max(lims))lpc = ''.join([''.format(p) for p in percentiles])d.filt.add(analyte + '' + lpc + '',inside,'' + lpc + '' + analyte + '',params, setn=setn)d.filt.add(analyte + '' + lpc + '',outside,'' + lpc + '' + analyte + '',params, setn=setn)prog.update()return", "docstring": "Calculate a gradient threshold filter to the data.\n\nGenerates two filters above and below the threshold value for a\ngiven analyte.\n\nParameters\n----------\nanalyte : str\n The analyte that the filter applies to.\nwin : int\n The window over which to calculate the moving gradient\npercentiles : float or iterable of len=2\n The percentile values.\nfilt : bool\n Whether or not to apply existing filters to the data before\n calculating this filter.\nsamples : array_like or None\n Which samples to apply this filter to. If None, applies to all\n samples.\nsubset : str or number\n The subset of samples (defined by make_subset) you want to apply\n the filter to.\n\nReturns\n-------\nNone", "id": "f2444:c0:m24"} {"signature": "@_logdef zeroscreen(self, focus_stage=None):", "body": "if focus_stage is None:focus_stage = self.focus_stagefor s in self.data.values():ind = np.ones(len(s.Time), dtype=bool)for v in s.data[focus_stage].values():ind = ind & (nominal_values(v) > )for k in s.data[focus_stage].keys():s.data[focus_stage][k][~ind] = unc.ufloat(np.nan, np.nan)self.set_focus(focus_stage)return", "docstring": "Remove all points containing data below zero (which are impossible!)", "id": "f2444:c0:m20"} {"signature": "def get_background(self, n_min=, n_max=None, focus_stage='', bkg_filter=False, f_win=, f_n_lim=):", "body": "allbkgs = {'': [],'': []}if focus_stage == '':if '' not in self.stages_complete:focus_stage = ''for a in self.analytes:allbkgs[a] = []n0 = for s in self.data.values():if sum(s.bkg) > :allbkgs[''].append(s.uTime[s.bkg])allbkgs[''].append(enumerate_bool(s.bkg, n0)[s.bkg])n0 = allbkgs[''][-][-]for a in self.analytes:allbkgs[a].append(s.data[focus_stage][a][s.bkg])allbkgs.update((k, np.concatenate(v)) for k, v in allbkgs.items())bkgs = pd.DataFrame(allbkgs) self.bkg = Bunch()if n_max is None:self.bkg[''] = bkgs.groupby('').filter(lambda x: len(x) > n_min)else:self.bkg[''] = bkgs.groupby('').filter(lambda x: (len(x) > n_min) & (len(x) < n_max))self.bkg[''] = self.bkg[''].groupby('').aggregate([np.mean, np.std, stderr])self.bkg[''].sort_values(('', ''), inplace=True)if bkg_filter:t = self.bkg[''].loc[:, idx[:, '']]r = t.rolling(f_win).aggregate([np.nanmean, np.nanstd])upper = r.loc[:, idx[:, :, '']] + f_n_lim * r.loc[:, idx[:, :, '']].valuesover = r.loc[:, idx[:, :, '']] > np.roll(upper.values, , )ns_drop = over.loc[over.apply(any, ), :].index.valuesself.bkg[''].drop(ns_drop, inplace=True)ind = np.ones(self.bkg[''].shape[], dtype=bool)for ns in ns_drop:ind = ind & (self.bkg[''].loc[:, ''] != ns)self.bkg[''] = self.bkg[''].loc[ind, :]return", "docstring": "Extract all background data from all samples on universal time scale.\nUsed by both 'polynomial' and 'weightedmean' methods.\n\nParameters\n----------\nn_min : int\n The minimum number of points a background region must\n have to be included in calculation.\nn_max : int\n The maximum number of points a background region must\n have to be included in calculation.\nfilter : bool\n If true, apply a rolling filter to the isolated background regions\n to exclude regions with anomalously high values. If True, two parameters\n alter the filter's behaviour:\nf_win : int\n The size of the rolling window\nf_n_lim : float\n The number of standard deviations above the rolling mean\n to set the threshold.\nfocus_stage : str\n Which stage of analysis to apply processing to. \n Defaults to 'despiked' if present, or 'rawdata' if not. \n Can be one of:\n * 'rawdata': raw data, loaded from csv file.\n * 'despiked': despiked data.\n * 'signal'/'background': isolated signal and background data.\n Created by self.separate, after signal and background\n regions have been identified by self.autorange.\n * 'bkgsub': background subtracted data, created by \n self.bkg_correct\n * 'ratios': element ratio data, created by self.ratio.\n * 'calibrated': ratio data calibrated to standards, created by self.calibrate.\n\nReturns\n-------\npandas.DataFrame object containing background data.", "id": "f2444:c0:m7"} {"signature": "@_logdef fit_classifier(self, name, analytes, method, samples=None,subset=None, filt=True, sort_by=, **kwargs):", "body": "if samples is not None:subset = self.make_subset(samples)self.get_focus(subset=subset, filt=filt)c = classifier(analytes,sort_by)c.fit(data=self.focus,method=method,**kwargs)self.classifiers[name] = creturn name", "docstring": "Create a clustering classifier based on all samples, or a subset.\n\nParameters\n----------\nname : str\n The name of the classifier.\nanalytes : str or iterable\n Which analytes the clustering algorithm should consider.\nmethod : str\n Which clustering algorithm to use. Can be:\n\n 'meanshift'\n The `sklearn.cluster.MeanShift` algorithm.\n Automatically determines number of clusters\n in data based on the `bandwidth` of expected\n variation.\n 'kmeans'\n The `sklearn.cluster.KMeans` algorithm. Determines\n the characteristics of a known number of clusters\n within the data. Must provide `n_clusters` to specify\n the expected number of clusters.\nsamples : iterable\n list of samples to consider. Overrides 'subset'.\nsubset : str\n The subset of samples used to fit the classifier. Ignored if\n 'samples' is specified.\nsort_by : int\n Which analyte the resulting clusters should be sorted\n by - defaults to 0, which is the first analyte.\n**kwargs :\n method-specific keyword parameters - see below.\nMeanshift Parameters\n bandwidth : str or float\n The bandwith (float) or bandwidth method ('scott' or 'silverman')\n used to estimate the data bandwidth.\n bin_seeding : bool\n Modifies the behaviour of the meanshift algorithm. Refer to\n sklearn.cluster.meanshift documentation.\nK - Means Parameters\n n_clusters : int\n The number of clusters expected in the data.\n\nReturns\n-------\nname : str", "id": "f2444:c0:m26"} {"signature": "@_logdef bkg_subtract(self, analytes=None, errtype='', focus_stage=''):", "body": "if analytes is None:analytes = self.analyteselif isinstance(analytes, str):analytes = [analytes]if focus_stage == '':if '' not in self.stages_complete:focus_stage = ''bkg_interps = {}for a in analytes:bkg_interps[a] = un_interp1d(x=self.bkg[''][''],y=un.uarray(self.bkg[''][a][''],self.bkg[''][a][errtype]))self.bkg_interps = bkg_interpswith self.pbar.set(total=len(self.data), desc='') as prog:for d in self.data.values():[d.bkg_subtract(a, bkg_interps[a].new(d.uTime), ~d.sig, focus_stage=focus_stage) for a in analytes]d.setfocus('')prog.update()self.stages_complete.update([''])self.focus_stage = ''return", "docstring": "Subtract calculated background from data.\n\nMust run bkg_calc first!\n\nParameters\n----------\nanalytes : str or iterable\n Which analyte(s) to subtract.\nerrtype : str\n Which type of error to propagate. default is 'stderr'.\nfocus_stage : str\n Which stage of analysis to apply processing to. \n Defaults to 'despiked' if present, or 'rawdata' if not. \n Can be one of:\n * 'rawdata': raw data, loaded from csv file.\n * 'despiked': despiked data.\n * 'signal'/'background': isolated signal and background data.\n Created by self.separate, after signal and background\n regions have been identified by self.autorange.\n * 'bkgsub': background subtracted data, created by \n self.bkg_correct\n * 'ratios': element ratio data, created by self.ratio.\n * 'calibrated': ratio data calibrated to standards, created by self.calibrate.", "id": "f2444:c0:m10"} {"signature": "@_logdef correlation_plots(self, x_analyte, y_analyte, window=, filt=True, recalc=False, samples=None, subset=None, outdir=None):", "body": "if outdir is None:outdir = self.report_dir + ''if not os.path.isdir(outdir):os.mkdir(outdir)if subset is not None:samples = self._get_samples(subset)elif samples is None:samples = self.subsets['']elif isinstance(samples, str):samples = [samples]with self.pbar.set(total=len(samples), desc='') as prog:for s in samples:f, a = self.data[s].correlation_plot(x_analyte=x_analyte, y_analyte=y_analyte,window=window, filt=filt, recalc=recalc)f.savefig(''.format(outdir, s, x_analyte, y_analyte))plt.close(f)prog.update()return", "docstring": "Plot the local correlation between two analytes.\n\nParameters\n----------\nx_analyte, y_analyte : str\n The names of the x and y analytes to correlate.\nwindow : int, None\n The rolling window used when calculating the correlation.\nfilt : bool\n Whether or not to apply existing filters to the data before\n calculating this filter.\nrecalc : bool\n If True, the correlation is re-calculated, even if it is already present.\n\nReturns\n-------\nNone", "id": "f2444:c0:m29"} {"signature": "@_logdef filter_defragment(self, threshold, mode='', filt=True, samples=None, subset=None):", "body": "if samples is not None:subset = self.make_subset(samples)samples = self._get_samples(subset)for s in samples:f = self.data[s].filt.grab_filt(filt)self.data[s].filt.add(name=''.format(mode, threshold),filt=filters.defrag(f, threshold, mode),info=''.format(mode, threshold),params=(threshold, mode, filt, samples, subset))", "docstring": "Remove 'fragments' from the calculated filter\n\nParameters\n----------\nthreshold : int\n Contiguous data regions that contain this number\n or fewer points are considered 'fragments'\nmode : str\n Specifies wither to 'include' or 'exclude' the identified\n fragments.\nfilt : bool or filt string\n Which filter to apply the defragmenter to. Defaults to True\nsamples : array_like or None\n Which samples to apply this filter to. If None, applies to all\n samples.\nsubset : str or number\n The subset of samples (defined by make_subset) you want to apply\n the filter to.\n\nReturns\n-------\nNone", "id": "f2444:c0:m34"} {"signature": "def long_file(data_file, dataformat, sample_list, savedir=None, srm_id=None, **autorange_args):", "body": "if isinstance(sample_list, str):if os.path.exists(sample_list):sample_list = np.genfromtxt(sample_list, dtype=str)else:raise ValueError('')elif not isinstance(sample_list, (list, np.ndarray)):raise ValueError('')if srm_id is not None:srm_replace = []for s in sample_list:if srm_id in s:s = srm_idsrm_replace.append(s)sample_list = srm_replace_, _, dat, meta = read_data(data_file, dataformat=dataformat, name_mode='')if '' in meta:d = dateutil.parser.parse(meta[''])else:d = datetime.datetime.now()bkg, sig, trn, _ = autorange(dat[''], dat[''], **autorange_args)ns = np.zeros(sig.size)ns[sig] = np.cumsum((sig ^ np.roll(sig, )) & sig)[sig]n = int(max(ns))if len(sample_list) != n:warn('' + '')bounds = []lower = sn = next_sample = ''for ni in range(n-):sample = sample_list[sn]next_sample = sample_list[sn + ]if sample != next_sample:current_end = np.argwhere(dat[''] == dat[''][ns == ni + ].max())[]next_start = np.argwhere(dat[''] == dat[''][ns == ni + ].min())[]upper = (current_end + next_start) // bounds.append((sample, (int(lower), int(upper))))lower = upper + sn += bounds.append((sample_list[-], (int(upper) + , len(ns))))sections = {}seen = {}for s, (lo, hi) in bounds:if s not in seen:seen[s] = else:seen[s] += s += ''.format(seen[s])sections[s] = {'': dat[''][lo:hi]}sections[s][''] = sections[s][''] - np.nanmin(sections[s][''])sections[s][''] = {}for k, v in dat[''].items():sections[s][''][k] = v[lo:hi]sections[s][''] = d + datetime.timedelta(seconds=np.nanmin(sections[s]['']))if savedir is None:savedir = os.path.join(os.path.dirname(os.path.abspath(data_file)), os.path.splitext(os.path.basename(data_file))[] + '')if not os.path.isdir(savedir):os.makedirs(savedir)header = [''.format(datetime.datetime.now().strftime(''))]if '' not in meta:header.append('')else:header.append('')header.append('')header.append('')flist = [savedir]for s, dat in sections.items():iheader = header.copy()iheader.append(''.format(s))iheader.append(''.format(dat[''].strftime('')))iheader = ''.join(iheader) + ''out = pd.DataFrame({analyte_2_namemass(k): v for k, v in dat[''].items()}, index=dat[''])out.index.name = ''csv = out.to_csv()with open(''.format(savedir, s), '') as f:f.write(iheader)f.write(csv)flist.append(''.format(s))print(\"\".format(n, ''.join(flist)))return None", "docstring": "TODO: Check for existing files in savedir, don't overwrite?", "id": "f2446:m1"} {"signature": "def residual_plots(df, rep_stats=None, els=['', '', '', '', '', '', '', '']):", "body": "As = []Rs = []analytes = [c for c in df.columns if ('' not in c)]ratios = [c for c in df.columns if ('' in c)]for e in els:if e == '':As.append('')elif e == '':As.append('')else:As.append([a for a in analytes if e in a][])Rs.append([r for r in ratios if e in r][])fig, axs = plt.subplots(len(els), , figsize=(, len(els) * ))for i, (e, a) in enumerate(zip(Rs, As)):lax, hax = axs[i]x = df.loc[:, e].valuesyl = df.loc[:, a].valuesc = element_colour(fmt_el(a))u = ''rl = yl - xlax.scatter(x, rl, c=c, s=, lw=, edgecolor='', alpha=)rl = rl[~np.isnan(rl)]lims = np.percentile(rl, [, ])lims += lims.ptp() * np.array((-, ))bins = np.linspace(*lims, )kdl = stats.gaussian_kde(rl, )hax.fill_betweenx(bins, kdl(bins), facecolor=c, alpha=, edgecolor='', lw=, label='')hax.set_xlim([, hax.get_xlim()[-]])lax.set_ylabel(e + ''+ u + '')lax.text(,,fmt_RSS(rl), fontsize=,ha='', va='', transform=lax.transAxes)xlim = np.percentile(x[~np.isnan(x)], [, ])lax.set_xlim(xlim)for ax in axs[i]:ax.set_ylim(lims)ax.axhline(, c='', ls='', alpha=)if rep_stats is not None:ax.axhspan(-rep_stats[e][] * , rep_stats[e][] * , color=(,,,), zorder=-)if not ax.is_first_col():ax.set_yticklabels([])if ax.is_last_row():hax.set_xlabel('')lax.set_xlabel('')if ax.is_first_row():lax.set_title('', loc='')fig.tight_layout()return fig, axs", "docstring": "Function for plotting Test User and LAtools data comparison.\n\nParameters\n----------\ndf : pandas.DataFrame\n A dataframe containing reference ('X/Ca_r'), test user \n ('X/Ca_t') and LAtools ('X123') data.\nrep_stats : dict\n Reproducibility stats of the reference data produced by\n `pairwise_reproducibility`\nels : list\n list of elements (names only) to plot.", "id": "f2447:m2"} {"signature": "def summary_stats(x, y, nm=None):", "body": "if isinstance(nm, str):nm = [nm]", "docstring": "Compute summary statistics for paired x, y data.\n\nTests\n-----\n\nParameters\n----------\nx, y : array-like\n Data to compare\nnm : str (optional)\n Index value of created dataframe.\n\nReturns\n-------\npandas dataframe of statistics.", "id": "f2450:m3"} {"signature": "def comparison_plots(df, els=['', '', '', '', '']):", "body": "As = []Rs = []analytes = [c for c in df.columns if ('' not in c) and ('' not in c)]ratios = [c for c in df.columns if ('' in c)]for e in els:if e == '':As.append('')elif e == '':As.append('')else:As.append([a for a in analytes if e in a][])Rs.append([r for r in ratios if e in r][][:-])fig, axs = plt.subplots(len(els), , figsize=(, len(els) * ))for i, (e, a) in enumerate(zip(Rs, As)):if a == '':m = u = ''else:m = u = ''c = element_colour(a)tax, lax, hax = axs[i]x = df.loc[:, e + ''].values * myt = df.loc[:, e + ''].values * myl = df.loc[:, a].values * mrt = yt - xrl = yl - xtax.scatter(x, yt, c=c, s=, lw=, edgecolor='', alpha=)lax.scatter(x, yl, c=c, s=, lw=, edgecolor='', alpha=)rt = rt[~np.isnan(rt)]rl = rl[~np.isnan(rl)]lims = np.percentile(np.hstack([rt, rl]), [, ])lims += lims.ptp() * np.array((-, ))bins = np.linspace(*lims, )kdt = stats.gaussian_kde(rt, )kdl = stats.gaussian_kde(rl, )hax.fill_between(bins, kdl(bins), facecolor=c, alpha=, edgecolor='', lw=, label='')hax.fill_between(bins, kdt(bins), facecolor=c, alpha=, edgecolor='', lw=, label='')hax.set_ylim([, hax.get_ylim()[-]])hax.set_xlim(lims)hax.axvline(, c='', ls='', alpha=)hax.set_ylabel('')tax.set_ylabel(e + ''+ u + '')tax.text(,,fmt_RSS(rt), fontsize=,ha='', va='', transform=tax.transAxes)lax.text(,,fmt_RSS(rl), fontsize=,ha='', va='', transform=lax.transAxes)xlim = np.percentile(x[~np.isnan(x)], [, ])for ax in [tax, lax]:ax.set_xlim(xlim)ax.set_ylim(xlim)ax.plot(xlim, xlim, c='', ls='', alpha=)for ax in axs[i]:if ax.is_last_row():hax.set_xlabel('')tax.set_xlabel('')lax.set_xlabel('')hax.legend(fontsize=)if ax.is_first_row():tax.set_title('', loc='')lax.set_title('', loc='')fig.tight_layout()return fig, axs", "docstring": "Function for plotting Test User and LAtools data comparison.\n\nParameters\n----------\ndf : pandas.DataFrame\n A dataframe containing reference ('X/Ca_r'), test user \n ('X/Ca_t') and LAtools ('X123') data.\nels : list\n list of elements (names only) to plot.", "id": "f2452:m4"} {"signature": "def rangecalcx(x, pad=):", "body": "mn = np.nanmin(x)mx = np.nanmax(x)rn = mx - mnreturn (mn - pad * rn, mx + pad * rn)", "docstring": "Calculate padded range limits for axes.", "id": "f2452:m2"} {"signature": "def residual_plots(df, rep_stats=None, els=['', '', '', '', '']):", "body": "As = []Rs = []analytes = [c for c in df.columns if ('' not in c) and ('' not in c)]ratios = [c for c in df.columns if ('' in c)]for e in els:if e == '':As.append('')elif e == '':As.append('')else:As.append([a for a in analytes if e in a][])Rs.append([r for r in ratios if e in r][][:-])fig, axs = plt.subplots(len(els), , figsize=(, len(els) * ))for i, (e, a) in enumerate(zip(Rs, As)):if a == '':m = u = ''else:m = u = ''tax, lax, hax = axs[i]x = df.loc[:, e + ''].values * myt = df.loc[:, e + ''].values * myl = df.loc[:, a].values * mrt = yt - xrl = yl - xtax.scatter(x, rt, c=element_colour(a), s=, lw=, edgecolor='', alpha=)lax.scatter(x, rl, c=element_colour(a), s=, lw=, edgecolor='', alpha=)rt = rt[~np.isnan(rt)]rl = rl[~np.isnan(rl)]lims = np.percentile(np.hstack([rt, rl]), [, ])lims += lims.ptp() * np.array((-, ))bins = np.linspace(*lims, )kdt = stats.gaussian_kde(rt, )kdl = stats.gaussian_kde(rl, )hax.fill_betweenx(bins, kdl(bins), facecolor=element_colour(a), alpha=, edgecolor='', lw=, label='')hax.fill_betweenx(bins, kdt(bins), facecolor=element_colour(a), alpha=, edgecolor='', lw=, label='')hax.set_xlim([, hax.get_xlim()[-]])tax.set_ylabel(e + ''+ u + '')tax.text(,,fmt_RSS(rt), fontsize=,ha='', va='', transform=tax.transAxes)lax.text(,,fmt_RSS(rl), fontsize=,ha='', va='', transform=lax.transAxes)xlim = np.percentile(x[~np.isnan(x)], [, ])for ax in [tax, lax]:ax.set_xlim(xlim)for ax in axs[i]:ax.set_ylim(lims)ax.axhline(, c='', ls='', alpha=)if rep_stats is not None:ax.axhspan(-rep_stats[e][] * , rep_stats[e][] * , color=(,,,), zorder=-)if not ax.is_first_col():ax.set_yticklabels([])if ax.is_last_row():hax.set_xlabel('')tax.set_xlabel('')lax.set_xlabel('')if ax.is_first_row():tax.set_title('', loc='')lax.set_title('', loc='')fig.tight_layout()return fig, axs", "docstring": "Function for plotting Test User and LAtools data comparison.\n\nParameters\n----------\ndf : pandas.DataFrame\n A dataframe containing reference ('X/Ca_r'), test user \n ('X/Ca_t') and LAtools ('X123') data.\nrep_stats : dict\n Reproducibility stats of the reference data produced by\n `pairwise_reproducibility`\nels : list\n list of elements (names only) to plot.", "id": "f2452:m5"} {"signature": "def read_codeml_output(filename,df,altall_cutoff=,):", "body": "with open(filename, '') as f:data = f.read()regex = re.compile('')trees = regex.findall(data)anc_tree = trees[]tip_tree = dendropy.Tree.get(data=trees[], schema='')anc_tree = dendropy.Tree.get(data=trees[], schema='')tree = tip_treeancestors = anc_tree.internal_nodes()for i, node in enumerate(tree.internal_nodes()):node.label = ancestors[i].labeldf[''] = Nonefor node in tree.postorder_node_iter():if node.parent_node is None:passelif node.is_leaf():node_label = node.taxon.labelparent_label = node.parent_node.labeldf.loc[df.uid == node_label, ''] = node_labelparent_id = df.loc[df.uid == node_label, ''].values[]df.loc[df.id == parent_id, ''] = node.parent_node.labelelif node.is_internal():label = node.labelparent_id = df.loc[df.reconstruct_label == label, ''].values[]df.loc[df.id == parent_id, ''] = node.parent_node.labelnode_regex = re.compile(\"\"\"\"\"\")node_num_regex = re.compile(\"\")df[''] = Nonedf[''] = Nonedf[''] = Nonedf[''] = Nonefor node in node_regex.findall(data):node_label = node_num_regex.search(node).group()site_regex = re.compile(\"\")ml_sequence, ml_posterior, alt_sequence, alt_posterior = [], [], [], []for site in site_regex.findall(node):scores = [float(site[i+:i+]) for i in range(,len(site), )]residues = [site[i] for i in range(, len(site), )]sorted_score_index = [i[] for i in sorted(enumerate(scores),key=lambda x:x[],reverse=True)]ml_idx = sorted_score_index[]alt_idx = sorted_score_index[]ml_sequence.append(residues[ml_idx])ml_posterior.append(scores[ml_idx])if scores[alt_idx] < altall_cutoff:alt_idx = ml_idxalt_sequence.append(residues[alt_idx])alt_posterior.append(scores[alt_idx])keys = [\"\",\"\",\"\",\"\"]vals = [\"\".join(ml_sequence),sum(ml_posterior) / len(ml_posterior),\"\".join(alt_sequence),sum(alt_posterior) / len(alt_posterior),]df.loc[df.reconstruct_label == node_label, keys] = valsreturn df", "docstring": "Read codeml file.", "id": "f2459:m0"} {"signature": "@staticmethoddef status(s):", "body": "print(''.format(s))", "docstring": "Prints things in bold.", "id": "f2460:c0:m0"} {"signature": "def is_applicable(self, date_string, strip_timezone=False, settings=None):", "body": "if strip_timezone:date_string, _ = pop_tz_offset_from_string(date_string, as_offset=False)date_string = self._translate_numerals(date_string)if settings.NORMALIZE:date_string = normalize_unicode(date_string)date_string = self._simplify(date_string, settings=settings)dictionary = self._get_dictionary(settings)date_tokens = dictionary.split(date_string)return dictionary.are_tokens_valid(date_tokens)", "docstring": "Check if the locale is applicable to translate date string.\n\n:param date_string:\n A string representing date and/or time in a recognizably valid format.\n:type date_string: str|unicode\n\n:param strip_timezone:\n If True, timezone is stripped from date string.\n:type strip_timezone: bool\n\n:return: boolean value representing if the locale is applicable for the date string or not.", "id": "f2485:c0:m1"} {"signature": "def get_locale_map(self, languages=None, locales=None, region=None,use_given_order=False, allow_conflicting_locales=False):", "body": "return OrderedDict(self._load_data(languages=languages, locales=locales, region=region, use_given_order=use_given_order,allow_conflicting_locales=allow_conflicting_locales))", "docstring": "Get an ordered mapping with locale codes as keys\nand corresponding locale instances as values.\n\n:param languages:\n A list of language codes, e.g. ['en', 'es', 'zh-Hant'].\n If locales are not given, languages and region are\n used to construct locales to load.\n:type languages: list\n\n:param locales:\n A list of codes of locales which are to be loaded,\n e.g. ['fr-PF', 'qu-EC', 'af-NA']\n:type locales: list\n\n:param region:\n A region code, e.g. 'IN', '001', 'NE'.\n If locales are not given, languages and region are\n used to construct locales to load.\n:type region: str|unicode\n\n:param use_given_order:\n If True, the returned mapping is ordered in the order locales are given.\n:type allow_redetect_language: bool\n\n:param allow_conflicting_locales:\n if True, locales with same language and different region can be loaded.\n:type allow_conflicting_locales: bool\n\n:return: ordered locale code to locale instance mapping", "id": "f2488:c0:m0"} {"signature": "def search_dates(text, languages=None, settings=None, add_detected_language=False):", "body": "result = _search_with_detection.search_dates(text=text, languages=languages, settings=settings)language, dates = result.get(''), result.get('')if dates:if add_detected_language:dates = [date + (language, ) for date in dates]return dates", "docstring": "Find all substrings of the given string which represent date and/or time and parse them.\n\n :param text:\n A string in a natural language which may contain date and/or time expressions.\n :type text: str|unicode\n\n :param languages:\n A list of two letters language codes.e.g. ['en', 'es']. If languages are given, it will\n not attempt to detect the language.\n :type languages: list\n\n :param settings:\n Configure customized behavior using settings defined in :mod:`dateparser.conf.Settings`.\n :type settings: dict\n\n :param add_detected_language:\n Indicates if we want the detected language returned in the tuple.\n :type add_detected_language: bool\n\n :return: Returns list of tuples containing:\n substrings representing date and/or time, corresponding :mod:`datetime.datetime`\n object and detected language if *add_detected_language* is True.\n Returns None if no dates that can be parsed are found.\n :rtype: list\n :raises: ValueError - Unknown Language\n\n >>> from dateparser.search import search_dates\n >>> search_dates('The first artificial Earth satellite was launched on 4 October 1957.')\n [('on 4 October 1957', datetime.datetime(1957, 10, 4, 0, 0))]\n\n >>> search_dates('The first artificial Earth satellite was launched on 4 October 1957.', add_detected_language=True)\n [('on 4 October 1957', datetime.datetime(1957, 10, 4, 0, 0), 'en')]\n\n >>> search_dates(\"The client arrived to the office for the first time in March 3rd, 2004 and got serviced, after a couple of months, on May 6th 2004, the customer returned indicating a defect on the part\")\n [('in March 3rd, 2004 and', datetime.datetime(2004, 3, 3, 0, 0)),\n ('on May 6th 2004', datetime.datetime(2004, 5, 6, 0, 0))]", "id": "f2781:m0"} {"signature": "def _parse_time(self, date_string, settings):", "body": "date_string = PATTERN.sub('', date_string)date_string = re.sub(r'', '', date_string)try:return time_parser(date_string)except:pass", "docstring": "Attemps to parse time part of date strings like '1 day ago, 2 PM", "id": "f2782:c0:m2"} {"signature": "def register_resources(self, **resources):", "body": "for key, resource in resources.items():if key in self._resources:raise AlreadyExistsException(''.format(key))self._init_resource(key, resource)", "docstring": "Register resources with the ResourceManager.", "id": "f2792:c1:m1"} {"signature": "def __call__(self, context):", "body": "yield None", "docstring": "Called by DataAccessContext when a resource is first\nrequested within a particular context. This generator\nshould yield one value that represents the resource\nfor `context`. It could open a new connection, or just\nreturn the current resource. After yielding, close\nthe resource if it is no longer needed.\n\nExample::\n\n connection = self.new_connection()\n try:\n yield connection\n except:\n connection.rollback()\n connection.close()\n else:\n # No errors\n connection.commit()\n connection.close()", "id": "f2792:c0:m1"} {"signature": "def replace_resource(self, key, resource):", "body": "return self._init_resource(key, resource)", "docstring": "Replace a Resource with another. Usually this is a bad\nidea but is often done in testing to replace a Resource\nwith a mock version. It's also used for practical\nreasons if you need to swap out resources for different\nframework implementations (ex: Greenlet version vs\nthreaded)\n\n:param key: Name of resource\n:param resource: Resource", "id": "f2792:c1:m2"} {"signature": "def __getattr__(self, name):", "body": "if self._state not in ('', '', ''):raise RuntimeError('')if name not in self._resources:if self._state not in ('', ''):raise RuntimeError('')resource = self.data_manager.get_resource(name)if resource:self._resource_generators[name] = resource(self)try:self._resources[name] = next(self._resource_generators[name])except StopIteration:raise ResourceSetupException(''.format(resource))else:raise AttributeError(''.format(name))return self._resources[name]", "docstring": "Gets a Resource from the DataManager and initializes\nit for the request.", "id": "f2794:c1:m6"} {"signature": "def _setup_hook(self):", "body": "pass", "docstring": "Handle any thing that needs to happen before start of the context.", "id": "f2794:c1:m9"} {"signature": "def __exit__(self, exc_type=None, exc_value=None, traceback=None):", "body": "assert self._state in ('', ''), ''self._state = ''if exc_type is not None and exc_value is None:exc_value = exc_type()generators, self._middleware_generators = self._middleware_generators, Nonewhile generators:__, generator = generators.pop()try:if self._exit(generator, exc_type, exc_value, traceback):exc_type, exc_value, traceback = (None, None, None)except:exc_type, exc_value, traceback = sys.exc_info()exc_value = exc_value or exc_type()try:self._teardown_hook(exc_value)except:self._resource_exit_errors.append(sys.exc_info())finally:resources, self._resource_generators = self._resource_generators, Nonewhile resources:__, resource_generator = resources.popitem()try:self._exit(resource_generator, exc_type, exc_value, traceback)except:self._resource_exit_errors.append(sys.exc_info())try:if exc_type:raise exc_type(exc_value).with_traceback(traceback)finally:try:self._final_hook(exc_value)finally:self.data_manager.ctx_stack.pop()self._state = ''", "docstring": "Close all open resources, middleware and\nremove context from stack\n\nIn-context exceptions (exceptions raised between ``__enter__``\nand ``__exit__``) are propagated to Middleware, Resources, and\neventually raised outside the ``DataAccessContext``. Resources\nare created in-context so if a Resource raises an exception\nduring setup, it treated the same as all in-context exceptions.\n\nMiddleware may suppress or replace the in-context exception. If\nthere is an unhandled exception raised in-context or by middleware\nit is guaranteed to be raised outside the ``DataAccessContext``.\n\nIf a Resource raises an exception, it is collected and all\nother Resource will still close. Resource exit exceptions do not\npropagate. The Resource will only see the in-context/middleware\nexception. To access Resource exit exceptions, use\n``DataAccessContext.get_resource_exit_errors()``.", "id": "f2794:c1:m4"} {"signature": "def __getattr__(self, key):", "body": "return self.get(key)", "docstring": "Returns meta value for key. Returns ``None`` if the\nkey has not been set.", "id": "f2794:c0:m2"} {"signature": "def register_resources(self, **resources):", "body": "self._resource_manager.register_resources(**resources)", "docstring": "Register Resources with the ResourceManager.", "id": "f2797:c1:m3"} {"signature": "def replace_service(self, key, service):", "body": "return self._init_service(key, service)", "docstring": "Replace a Service with another. Usually this is a bad\nidea but is often done in testing to replace a Service\nwith a mock version. It's also used for practical\nreasons if you need to swap out services for different\nframework implementations (ex: Greenlet version vs\nthreaded)\n\n:param key: Name of service\n:param service: Service", "id": "f2797:c0:m2"} {"signature": "def register_context_middleware(self, *middleware):", "body": "for m in middleware:if not is_generator(m):raise Exception(''.format(m))self._middleware.extend(middleware)", "docstring": ":param middleware: Middleware in order of execution", "id": "f2797:c1:m1"} {"signature": "def encrypt(pubkey, password):", "body": "key = load_key(pubkey)encrypted_password = key.encrypt(password, PKCS1v15())return base64.b64encode(encrypted_password)", "docstring": "Encrypt password using given RSA public key and encode it with base64.\n\n The encrypted password can only be decrypted by someone with the\n private key (in this case, only Travis).", "id": "f2810:m1"} {"signature": "def update_travis_deploy_password(encrypted_password):", "body": "config = load_yaml_config(TRAVIS_CONFIG_FILE)config[''][''] = dict(secure=encrypted_password)save_yaml_config(TRAVIS_CONFIG_FILE, config)line = ('''')prepend_line(TRAVIS_CONFIG_FILE, line)", "docstring": "Put `encrypted_password` into the deploy section of .travis.yml.", "id": "f2810:m6"} {"signature": "def load_yaml_config(filepath):", "body": "with open(filepath) as f:return yaml.load(f)", "docstring": "Load yaml config file at the given path.", "id": "f2810:m4"} {"signature": "def read(self):", "body": "try:data = load(open(self.file), Loader)except (UnicodeDecodeError, YAMLError) as e:raise InvalidConfig(self.file, ''.format(e))try:validate(data, SCHEMA)except ValidationError as e:raise InvalidConfig(self.file, e)self.update(data)", "docstring": "Parse and validate the config file. The read data is accessible as a dictionary in this instance\n\n :return: None", "id": "f2811:c0:m1"} {"signature": "def get_file_group(file):", "body": "try:return getgrgid(os.stat(file).st_uid)[]except KeyError:return ''", "docstring": "Get file group id\n\n :param file: Path to file\n :return: group id\n :rtype: int", "id": "f2811:m1"} {"signature": "def __init__(self, file, ignore_perms=False, **kwargs):", "body": "super(Config, self).__init__(**kwargs)if not os.path.lexists(file):raise ConfigFileNotFoundError(file)if not ignore_perms and ((not os.getuid() and not only_root_write(file)) or oth_w_perm(file)):file = os.path.abspath(file)raise SecurityException(''''''.format(file=file, user=get_file_owner(file),group=get_file_group(file), perms=os.stat(file).st_mode & ,msg='' if os.getuid()else ''))self.file = fileself.read()", "docstring": "Set the config file and validate file permissions\n\n :param str file: path to file\n :param kwargs: default values in dict", "id": "f2811:c0:m0"} {"signature": "def discovery_print(pkt):", "body": "if pkt.src in mac_id_list:returnmac_id_list.append(pkt.src)text = pkt_text(pkt)click.secho(text, fg='') if '' in text else click.echo(text)", "docstring": "Scandevice callback. Register src mac to avoid src repetition.\n Print device on screen.\n\n :param scapy.packet.Packet pkt: Scapy Packet\n :return: None", "id": "f2813:m1"} {"signature": "def get_method(self):", "body": "return self.default_method", "docstring": "Get HTTP method. By default default_method\n\n :return: HTTP method\n :rtype: str", "id": "f2823:c3:m2"} {"signature": "def get_body(self):", "body": "if self.default_body:return self.default_bodydata = self.data.get('')if isinstance(data, dict):return json.dumps(data)return data", "docstring": "Return \"data\" value on self.data\n\n :return: data to send\n :rtype: str", "id": "f2823:c4:m1"} {"signature": "def validate(self):", "body": "if (self.data.get('') or self.data.get('')) andself.data.get('', '').lower() not in CONTENT_TYPE_METHODS:raise InvalidConfig(extra_body=''''.format(''.join(CONTENT_TYPE_METHODS), self.name))self.data[''] = CONTENT_TYPE_ALIASES.get(self.data.get(''),self.data.get(''))form_type = CONTENT_TYPE_ALIASES['']if self.data.get('') and (self.data.get('') or form_type) == form_type:try:self.data[''] = json.loads(self.data[''])except JSONDecodeError:raise InvalidConfig(extra_body=''.format(self.name))", "docstring": "Check self.data. Raise InvalidConfig on error\n\n :return: None", "id": "f2823:c2:m0"} {"signature": "def get_url(self):", "body": "if not self.data[self.execute_name]:raise InvalidConfig(extra_body=''''.format(self.name))if not self.data.get(''):raise InvalidConfig(extra_body=''''''.format(self.name))url = self.url_pattern.format(event=self.data[''], key=self.data[self.execute_name])return url", "docstring": "IFTTT Webhook url\n\n :return: url\n :rtype: str", "id": "f2823:c7:m0"} {"signature": "def get_headers(self):", "body": "headers = copy.copy(self.default_headers or {})headers.update(self.data.get('') or {})return headers", "docstring": "Get HTTP Headers to send. By default default_headers\n\n :return: HTTP Headers\n :rtype: dict", "id": "f2823:c3:m4"} {"signature": "def execute(self, root_allowed=False):", "body": "raise NotImplementedError", "docstring": "Execute using self.data\n\n :param bool root_allowed: Only used for ExecuteCmd\n :return:", "id": "f2823:c0:m2"} {"signature": "def run_as_cmd(cmd, user, shell=''):", "body": "to_execute = get_shell(shell) + [EXECUTE_SHELL_PARAM, cmd]if user == '':return to_executereturn ['', '', '', '', user] + to_execute", "docstring": "Get the arguments to execute a command as a user\n\n :param str cmd: command to execute\n :param user: User for use\n :param shell: Bash, zsh, etc.\n :return: arguments\n :rtype: list", "id": "f2823:m1"} {"signature": "def get_url(self):", "body": "url = super(ExecuteHomeAssistant, self).get_url()if not self.data.get(''):raise InvalidConfig(extra_body=''.format(self.name))url += ''.format(self.data[''])return url", "docstring": "Home assistant url\n\n :return: url\n :rtype: str", "id": "f2823:c5:m0"} {"signature": "def execute_cmd(cmd, cwd=None, timeout=):", "body": "p = subprocess.Popen(cmd, cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)try:p.wait(timeout=timeout)except subprocess.TimeoutExpired:return Noneelse:stdout, stderr = p.stdout.read(), p.stderr.read()if sys.version_info >= (,):stdout, stderr = stdout.decode('', errors=''), stderr.decode('', errors='')if p.returncode:raise ExecuteError(''.format(''.join(cmd), p.returncode, stderr))else:return stdout, stderr", "docstring": "Excecute command on thread\n\n :param cmd: Command to execute\n :param cwd: current working directory\n :return: None", "id": "f2823:m2"} {"signature": "def get_content_type(self):", "body": "return self.default_content_type", "docstring": "Get HTTP content type to send. By default default_content_type\n\n :return: HTTP content type\n :rtype: str", "id": "f2823:c3:m3"} {"signature": "def send_confirmation(self, message, success=True):", "body": "message = message.strip()if not self.confirmation:returntry:self.confirmation.send(message, success)except Exception as e:logger.warning(''.format(self.name, e))", "docstring": "Send success or error message to configured confirmation\n\n :param str message: Body message to send\n :param bool success: Device executed successfully to personalize message\n :return: None", "id": "f2825:c0:m3"} {"signature": "def on_push(self, device):", "body": "src = device.src.lower()if last_execution[src] + self.settings.get('', DEFAULT_DELAY) > time.time():returnlast_execution[src] = time.time()self.execute(device)", "docstring": "Press button. Check DEFAULT_DELAY.\n\n :param scapy.packet.Packet device: Scapy packet\n :return: None", "id": "f2825:c1:m1"} {"signature": "def __init__(self, config_path, ignore_perms=False):", "body": "self.config = Config(config_path, ignore_perms)self.settings = self.config.get('', {})self.devices = {key.lower(): Device(key, value, self.config)for key, value in self.config[''].items()}assert len(self.devices) == len(self.config['']), \"\"", "docstring": ":param str config_path: Path to config file", "id": "f2825:c1:m0"} {"signature": "def create_logger(name, level=logging.INFO):", "body": "logger = logging.getLogger(name)logger.setLevel(level)ch = logging.StreamHandler()ch.setLevel(level)formatter = logging.Formatter('')ch.setFormatter(formatter)logger.addHandler(ch)", "docstring": "Create a Logger and set handler and formatter\n\n :param name: logger name\n :param level: logging level\n :return: None", "id": "f2830:m0"} {"signature": "def __init__(self, file=None, extra_body=''):", "body": "body = ''if file:file = os.path.abspath(file)body += ''.format(file)body += ''.format(file)if extra_body:body += ''.format(extra_body)super(InvalidConfig, self).__init__(body)", "docstring": ":param str file: Path to config file\n:param extra_body: complementary message", "id": "f2831:c3:m0"} {"signature": "def find(ellipsname, crstype, strict=False):", "body": "if not strict:ellipsname = ellipsname.lower().replace(\"\",\"\")for itemname,item in globals().items():if itemname.startswith(\"\") or itemname == '':continuetry:if hasattr(item.name, crstype):itemname = getattr(item.name, crstype)if not strict:itemname = itemname.lower().replace(\"\",\"\")if ellipsname == itemname:return itemexcept:passelse:return None", "docstring": "Search for a ellipsoid name located in this module.\n\nArguments:\n\n- **ellipsname**: The ellipsoid name to search for.\n- **crstype**: Which CRS naming convention to search (different\n CRS formats have different names for the same ellipsoid).\n- **strict** (optional): If False, ignores minor name mismatches\n such as underscore or character casing, otherwise must be exact\n match (defaults to False).", "id": "f2841:m0"} {"signature": "def to_ogc_wkt(self):", "body": "return '' % (self.name, self.datum.to_ogc_wkt(), self.prime_mer.to_ogc_wkt(), self.angunit.to_ogc_wkt(), self.twin_ax[].ogc_wkt, self.twin_ax[].ogc_wkt )", "docstring": "Returns the CS as a OGC WKT formatted string.", "id": "f2844:c1:m2"} {"signature": "def to_proj4(self, as_dict=False):", "body": "string = \"\" % self.proj.to_proj4()string += \"\" % self.geogcs.to_proj4(toplevel=False)string += \"\" + \"\".join(param.to_proj4() for param in self.params)string += \"\" % self.unit.to_proj4()string += \"\" + self.twin_ax[].proj4 + self.twin_ax[].proj4 + \"\" string += \"\"if as_dict:return dict([entry.lstrip('').split('')for entry in string.split()if entry != \"\"])else:return string", "docstring": "Returns the CS as a proj4 formatted string or dict.\n\nArguments:\n\n- **as_dict** (optional): If True, returns the proj4 string as a dict (defaults to False).", "id": "f2844:c2:m1"} {"signature": "def to_ogc_wkt(self):", "body": "string = '' % (self.name, self.geogcs.to_ogc_wkt(), self.proj.to_ogc_wkt() )string += \"\".join(param.to_ogc_wkt() for param in self.params)string += '' % self.unit.to_ogc_wkt()string += '' % (self.twin_ax[].ogc_wkt, self.twin_ax[].ogc_wkt )return string", "docstring": "Returns the CS as a OGC WKT formatted string.", "id": "f2844:c2:m2"} {"signature": "def to_proj4(self, as_dict=False, toplevel=True):", "body": "if toplevel:string = \"\" % (self.datum.to_proj4(), self.prime_mer.to_proj4())else:string = \"\" % (self.datum.to_proj4(), self.prime_mer.to_proj4())if as_dict:return dict([entry.lstrip('').split('')for entry in string.split()if entry != \"\"])else:return string", "docstring": "Returns the CS as a proj4 formatted string or dict.\n\nArguments:\n\n- **as_dict** (optional): If True, returns the proj4 string as a dict (defaults to False).\n- **toplevel** (optional): If True, treats this CS as the final toplevel CS and adds the necessary proj4 elements (defaults to True).", "id": "f2844:c1:m1"} {"signature": "def to_esri_wkt(self):", "body": "string = '' % (self.name, self.geogcs.to_esri_wkt(), self.proj.to_esri_wkt() )string += \"\".join(param.to_esri_wkt() for param in self.params)string += '' % self.unit.to_esri_wkt()string += '' % (self.twin_ax[].esri_wkt, self.twin_ax[].esri_wkt )return string", "docstring": "Returns the CS as a ESRI WKT formatted string.", "id": "f2844:c2:m3"} {"signature": "def from_unknown_wkt(string, strict=False):", "body": "return _from_wkt(string, None, strict)", "docstring": "Given an unknown wkt string, detect if uses ogc or esri flavor, and parse the crs accordingly.\n\nArguments:\n\n- *string*: The unknown WKT representation as a string.\n- *strict* (optional): When True, the parser is strict about names having to match\n exactly with upper and lowercases. Default is not strict (False).\n\nReturns:\n- A CS instance of the indicated type.", "id": "f2849:m5"} {"signature": "def from_esri_wkt(string, strict=False):", "body": "return _from_wkt(string, \"\", strict)", "docstring": "Parse crs as esri wkt formatted string and return the resulting crs object.\n\nArguments:\n\n- *string*: The ESRI WKT representation as a string.\n- *strict* (optional): When True, the parser is strict about names having to match\n exactly with upper and lowercases. Default is not strict (False).\n\nReturns:\n\n- A CS instance of the indicated type.", "id": "f2849:m4"} {"signature": "def from_esri_code(code):", "body": "code = str(code)proj4 = utils.crscode_to_string(\"\", code, \"\")crs = from_proj4(proj4)return crs", "docstring": "Load crs object from esri code, via spatialreference.org.\nParses based on the proj4 representation.\n\nArguments:\n\n- *code*: The ESRI code as an integer.\n\nReturns:\n\n- A CS instance of the indicated type.", "id": "f2849:m1"} {"signature": "def dumps(obj):", "body": "if not isinstance(obj, dict):raise TypeError('' + type(obj).__name__)return ''.join(_dumps(obj, level=)) + ''", "docstring": "Serializes a dictionary into ACF data.\n:param obj: A dictionary to serialize.\n:return: ACF data.", "id": "f2856:m2"} {"signature": "def load(fp, wrapper=dict):", "body": "return loads(fp.read(), wrapper=wrapper)", "docstring": "Loads the contents of an Appinfo file into a Python object.\n:param fp: A file object.\n:param wrapper: A wrapping object for key-value pairs.\n:return: An Ordered Dictionary with Appinfo data.", "id": "f2857:m0"} {"signature": "def dump(obj, fp):", "body": "fp.write(dumps(obj))", "docstring": "Serializes a dictionary into Appinfo data and writes it to a file.\n:param obj: A dictionary to serialize.\n:param fp: A file object.", "id": "f2857:m2"} {"signature": "def load(fp, wrapper=dict):", "body": "return loads(fp.read(), wrapper=wrapper)", "docstring": "Loads the contents of a Manifest file into a Python object.\n:param fp: A file object.\n:param wrapper: A wrapping object for key-value pairs.\n:return: A dictionary with Manifest data.", "id": "f2858:m0"} {"signature": "def dump(obj, fp):", "body": "fp.write(dumps(obj))", "docstring": "Serializes a dictionary into Manifest data and writes it to a file.\n:param obj: A dictionary to serialize.\n:param fp: A file object.", "id": "f2858:m2"} {"signature": "def get_item_abspath(self, identifier):", "body": "admin_metadata = self.get_admin_metadata()uuid = admin_metadata[\"\"]dataset_cache_abspath = os.path.join(self._s3_cache_abspath, uuid)mkdir_parents(dataset_cache_abspath)bucket_fpath = self.data_key_prefix + identifierobj = self.s3resource.Object(self.bucket, bucket_fpath)relpath = obj.get()['']['']_, ext = os.path.splitext(relpath)local_item_abspath = os.path.join(dataset_cache_abspath,identifier + ext)if not os.path.isfile(local_item_abspath):tmp_local_item_abspath = local_item_abspath + \"\"self.s3resource.Bucket(self.bucket).download_file(bucket_fpath,tmp_local_item_abspath)os.rename(tmp_local_item_abspath, local_item_abspath)return local_item_abspath", "docstring": "Return absolute path at which item content can be accessed.\n\n :param identifier: item identifier\n :returns: absolute path from which the item content can be accessed", "id": "f2867:c0:m24"} {"signature": "def loop_options(options):", "body": "def ch(option, parsed_option):return lambda self: self.check_valid_option(option, parsed_option)for i, items in enumerate(options):prefix_tmp = \"\" % (items[], i)setattr(TestOptions, \"\" % prefix_tmp, ch(items[], items[]))", "docstring": "Loop over list of options and dynamically create tests", "id": "f2874:m0"} {"signature": "def _process_ssh_rsa(self, data):", "body": "current_position, raw_e = self._unpack_by_int(data, )current_position, raw_n = self._unpack_by_int(data, current_position)unpacked_e = self._parse_long(raw_e)unpacked_n = self._parse_long(raw_n)self.rsa = RSAPublicNumbers(unpacked_e, unpacked_n).public_key(default_backend())self.bits = self.rsa.key_sizeif self.strict_mode:min_length = self.RSA_MIN_LENGTH_STRICTmax_length = self.RSA_MAX_LENGTH_STRICTelse:min_length = self.RSA_MIN_LENGTH_LOOSEmax_length = self.RSA_MAX_LENGTH_LOOSEif self.bits < min_length:raise TooShortKeyError(\"\" % (self.key_type, min_length, self.bits))if self.bits > max_length:raise TooLongKeyError(\"\" % (self.key_type, max_length, self.bits))return current_position", "docstring": "Parses ssh-rsa public keys.", "id": "f2878:c1:m13"} {"signature": "def _unpack_by_int(self, data, current_position):", "body": "try:requested_data_length = struct.unpack('', data[current_position:current_position + self.INT_LEN])[]except struct.error:raise MalformedDataError(\"\" % self.INT_LEN)current_position += self.INT_LENremaining_data_length = len(data[current_position:])if remaining_data_length < requested_data_length:raise MalformedDataError(\"\" % (requested_data_length, remaining_data_length))next_data = data[current_position:current_position + requested_data_length]current_position += requested_data_lengthreturn current_position, next_data", "docstring": "Returns a tuple with (location of next data field, contents of requested data field).", "id": "f2878:c1:m7"} {"signature": "def parse(self, keydata=None):", "body": "if keydata is None:if self.keydata is None:raise ValueError(\"\")keydata = self.keydataelse:self.reset()self.keydata = keydataif keydata.startswith(\"\"):key_type = None pubkey_content = \"\".join([line for line in keydata.split(\"\") if \"\" not in line and \"\" not in line])else:key_parts = self._split_key(keydata)key_type = key_parts[]pubkey_content = key_parts[]self._decoded_key = self.decode_key(pubkey_content)current_position, unpacked_key_type = self._unpack_by_int(self._decoded_key, )if key_type is not None and key_type != unpacked_key_type.decode():raise InvalidTypeError(\"\" % (key_type, unpacked_key_type))self.key_type = unpacked_key_typekey_data_length = self._process_key(self._decoded_key[current_position:])current_position = current_position + key_data_lengthif current_position != len(self._decoded_key):raise MalformedDataError(\"\" % (len(self._decoded_key) - current_position))if self.disallow_options and self.options:raise InvalidOptionsError(\"\")", "docstring": "Validates SSH public key.\n\n Throws exception for invalid keys. Otherwise returns None.\n\n Populates key_type, bits and bits fields.\n\n For rsa keys, see field \"rsa\" for raw public key data.\n For dsa keys, see field \"dsa\".\n For ecdsa keys, see field \"ecdsa\".", "id": "f2878:c1:m18"} {"signature": "@classmethoddef decode_key(cls, pubkey_content):", "body": "try:decoded_key = base64.b64decode(pubkey_content.encode(\"\"))except (TypeError, binascii.Error):raise MalformedDataError(\"\")return decoded_key", "docstring": "Decode base64 coded part of the key.", "id": "f2878:c1:m10"} {"signature": "def write_output(filepath, pack, filename, content):", "body": "if (filepath and filepath != ''and not os.path.exists(os.path.join(filepath, pack))):os.makedirs(os.path.join(filepath, pack))destination = os.path.join(filepath, pack, filename)with io.open(destination, '', encoding='') as f:f.write(content)return destination", "docstring": "Write content to filepath+pack+filename, create filepath if it does not\nallready exists.\n\nThis an helper to automatically rewrite HTML outputs when needed during\ndevelopment.\n\nBeware, using this will lose every templating instructions previously\nwritten in output attempts.", "id": "f2895:m0"} {"signature": "def _extract_version(package_name):", "body": "try:return pkg_resources.get_distribution(package_name).versionexcept pkg_resources.DistributionNotFound:_conf = read_configuration(os.path.join(PROJECT_DIR, \"\"))return _conf[\"\"][\"\"]", "docstring": "Get package version from installed distribution or configuration file if not\ninstalled", "id": "f2911:m0"} {"signature": "def available_sources(self):", "body": "return list(self.SOURCES.keys())", "docstring": "Return a list of available sources.", "id": "f2924:c1:m9"} {"signature": "def tuner_am_preset(self, operator, value=None):", "body": "return self.exec_command('', '', operator, value)", "docstring": "Execute Tuner.AM.Preset.", "id": "f2924:c0:m12"} {"signature": "def status(self):", "body": "nad_reply = self._send(self.POLL_VOLUME +self.POLL_POWER +self.POLL_MUTED +self.POLL_SOURCE, read_reply=True)if nad_reply is None:returnnum_chars = nad_status = [nad_reply[i:i + num_chars]for i in range(, len(nad_reply), num_chars)]return {'': int(nad_status[][-:], ),'': nad_status[][-:] == '','': nad_status[][-:] == '','': self.SOURCES_REVERSED[nad_status[][-:]]}", "docstring": "Return the status of the device.\n\nReturns a dictionary with keys 'volume' (int 0-200) , 'power' (bool),\n 'muted' (bool) and 'source' (str).", "id": "f2924:c1:m2"} {"signature": "def select_source(self, source):", "body": "status = self.status()if status['']: if status[''] != source: if source in self.SOURCES:self._send(self.CMD_SOURCE + self.SOURCES[source], read_reply=True)", "docstring": "Select a source from the list of sources.", "id": "f2924:c1:m8"} {"signature": "def set_volume(self, volume):", "body": "if <= volume <= :volume = format(volume, \"\") self._send(self.CMD_VOLUME + volume)", "docstring": "Set volume level of the device. Accepts integer values 0-200.", "id": "f2924:c1:m5"} {"signature": "def power_on(self):", "body": "status = self.status()if not status['']:self._send(self.CMD_ON, read_reply=True)sleep()", "docstring": "Power the device on.", "id": "f2924:c1:m4"} {"signature": "def get_setup_version():", "body": "ver = ''.join(map(str, VERSION[:]))if not VERSION[]:return verhyphen = ''suffix = hyphen.join(map(str, VERSION[-:]))if VERSION[] in [VERSION_SUFFIX_DEV, VERSION_SUFFIX_POST]:hyphen = ''ver = hyphen.join([ver, suffix])return ver", "docstring": "\u83b7\u53d6\u6253\u5305\u4f7f\u7528\u7684\u7248\u672c\u53f7\uff0c\u7b26\u5408 PYPI \u5b98\u65b9\u63a8\u8350\u7684\u7248\u672c\u53f7\u65b9\u6848\n\n:return: PYPI \u6253\u5305\u7248\u672c\u53f7\n:rtype: str", "id": "f2926:m0"} {"signature": "def _register(func):", "body": "hand[func.__name__] = funcreturn func", "docstring": "\u5c06\u88ab\u88c5\u9970\u51fd\u6570\u6ce8\u518c\u5230 hand \u5355\u4f8b\u5b57\u5178\u4e2d\n\n:param object func: \u88ab\u88c5\u9970\u51fd\u6570\n:return: \u88ab\u88c5\u9970\u7684\u51fd\u6570\u672c\u8eab\uff08\u4e0d\u8fdb\u884c\u5c01\u88c5\uff09\n:rtype: function", "id": "f2927:m0"} {"signature": "@abc.abstractmethoddef version(self):", "body": "", "docstring": "\u7248\u672c\u4fe1\u606f\n\n:return: \u8fd4\u56de\u5305\u540d\uff0c\u7248\u672c\u53f7\u5143\u7ec4\n:rtype: (str, str)", "id": "f2928:c0:m1"} {"signature": "def __new__(mcs, *args, **kwargs):", "body": "class_name, super_cls, dict_ = argsdict_[''] = Nonecls_ = super(Singleton, mcs).__new__(mcs, class_name, super_cls, dict_, **kwargs)return cls_", "docstring": "\u5143\u7c7bmsc\u901a\u8fc7__new__\u7ec4\u5efa\u7c7b\u5bf9\u8c61\uff0c\u5176\u4e2dmsc\u6307Singleton\n\n:param list args: \u53ef\u4ee5\u5305\u542b\u7c7b\u6784\u5efa\u6240\u9700\u8981\u4e09\u5143\u7d20\uff0c ``\u7c7b\u540d`` \uff0c ``\u7236\u7c7b`` \uff0c\n ``\u547d\u540d\u7a7a\u95f4``, \u5176\u4e2d\u547d\u540d\u7a7a\u95f4\u4e2d __qualname__ \u548c\u51fd\u6570\u7684 __qualname__\n \u5747\u542b\u6709 classname \u505a\u4e3a\u524d\u7f00\uff0c\u5728\u8fd9\u91cc\uff0c\u5982\u679c\u60f3\u66ff\u6362\u7c7b\u540d\uff0c\u9700\u8981\u628a\u4ee5\u4e0a\u5168\u90e8\u66ff\u6362\u624d\u53ef\u4ee5\u3002\n:param dict kwargs: \u53ef\u4ee5\u81ea\u5b9a\u4e49\u4f20\u9012\u4e00\u4e9b\u53c2\u6570\n:return: \u8fd4\u56de\u7c7b\u5bf9\u8c61,\u901a\u8fc7super(Singleton, mcs).__new__\u6b64\u65f6\u5df2\u7ecf\u7ec4\u88c5\u597d\u4e86\u7c7b\n:rtype: class", "id": "f2932:c1:m0"} {"signature": "def get_commands_from_module(imported):", "body": "imported_vars = vars(imported)if \"\" in imported_vars:imported_vars = [(name, imported_vars[name]) for name inimported_vars if name in imported_vars[\"\"]]else:imported_vars = imported_vars.items()cmd_dict = extract_commands(imported_vars)return imported.__doc__, cmd_dict", "docstring": "\u4ece\u4f20\u5165\u7684 ``imported`` \u4e2d\u83b7\u53d6\u6240\u6709 ``click.core.Command``\n\n:param module imported: \u5bfc\u5165\u7684Python\u5305\n:return: \u5305\u63cf\u8ff0\u6587\u6863\uff0c\u4ec5\u542b\u7ec8\u7aef\u547d\u4ee4\u51fd\u6570\u7684\u5bf9\u8c61\u5b57\u5178\n:rtype: (str, dict(str, object))", "id": "f2934:m2"} {"signature": "@propertydef xml(self) -> str:", "body": "return self._xml", "docstring": ":return: The XML representation of the beacon, as a string", "id": "f2951:c0:m14"} {"signature": "@propertydef timestamp(self) -> int:", "body": "return self._timestamp", "docstring": ":return:\n The time the seed value was generated as the number of\n seconds since January 1, 1970", "id": "f2951:c0:m11"} {"signature": "def __init__(self,key_name: str,file_path: str,magic_line: str,strip_end_chars: int = ,):", "body": "self.key_name = key_nameself.file_path = file_pathself.magic_line = magic_lineself.strip_end_chars = strip_end_chars", "docstring": "A simple python object that is used to extract and\n(eventually) update version values across the project\n\nThe 'magic_line' is the first part of a file line that\nis before the version number. So for example, if the version\nis stored on a line such as '__version__=1' your magic line\nwill be '__version__='. If you need to remove any extra characters\nat the end of the version line, increase the 'strip_end_chars'\nproperty on this object.\n\n:param key_name: The key name, or reference name, for this file\n:param file_path: The path to the file\n:param magic_line: The \"magic line\" to search for\n:param strip_end_chars: The number of characters to strip off the end", "id": "f2956:c0:m0"} {"signature": "def copy_dir(bucket_name, src_path, dest_path,aws_access_key_id=None, aws_secret_access_key=None,aws_profile=None,surrogate_key=None, cache_control=None,surrogate_control=None,create_directory_redirect_object=True):", "body": "if not src_path.endswith(''):src_path += ''if not dest_path.endswith(''):dest_path += ''common_prefix = os.path.commonprefix([src_path, dest_path])if common_prefix == src_path:msg = ''.format(common_prefix, src_path)raise RuntimeError(msg)if common_prefix == dest_path:msg = ''.format(common_prefix, dest_path)raise RuntimeError(msg)delete_dir(bucket_name, dest_path,aws_access_key_id, aws_secret_access_key)session = boto3.session.Session(aws_access_key_id=aws_access_key_id,aws_secret_access_key=aws_secret_access_key,profile_name=aws_profile)s3 = session.resource('')bucket = s3.Bucket(bucket_name)for src_obj in bucket.objects.filter(Prefix=src_path):src_rel_path = os.path.relpath(src_obj.key, start=src_path)dest_key_path = os.path.join(dest_path, src_rel_path)head = s3.meta.client.head_object(Bucket=bucket_name,Key=src_obj.key)metadata = head['']content_type = head['']if cache_control is None and '' in head:cache_control = head['']if surrogate_control is not None:metadata[''] = surrogate_controlif surrogate_key is not None:metadata[''] = surrogate_keys3.meta.client.copy_object(Bucket=bucket_name,Key=dest_key_path,CopySource={'': bucket_name, '': src_obj.key},MetadataDirective='',Metadata=metadata,ACL='',CacheControl=cache_control,ContentType=content_type)if create_directory_redirect_object:dest_dirname = dest_path.rstrip('')obj = bucket.Object(dest_dirname)metadata = {'': ''}obj.put(Body='',ACL='',Metadata=metadata,CacheControl=cache_control)", "docstring": "Copy objects from one directory in a bucket to another directory in\n the same bucket.\n\n Object metadata is preserved while copying, with the following exceptions:\n\n - If a new surrogate key is provided it will replace the original one.\n - If ``cache_control`` and ``surrogate_control`` values are provided they\n will replace the old one.\n\n Parameters\n ----------\n bucket_name : `str`\n Name of an S3 bucket.\n src_path : `str`\n Source directory in the S3 bucket. The ``src_path`` should ideally end\n in a trailing `'/'`. E.g. `'dir/dir2/'`.\n dest_path : `str`\n Destination directory in the S3 bucket. The ``dest_path`` should\n ideally end in a trailing `'/'`. E.g. `'dir/dir2/'`. The destination\n path cannot contain the source path.\n aws_access_key_id : `str`\n The access key for your AWS account. Also set\n ``aws_secret_access_key``.\n aws_secret_access_key : `str`\n The secret key for your AWS account.\n aws_profile : `str`, optional\n Name of AWS profile in :file:`~/.aws/credentials`. Use this instead\n of ``aws_access_key_id`` and ``aws_secret_access_key`` for file-based\n credentials.\n surrogate_key : `str`, optional\n The surrogate key to insert in the header of all objects in the\n ``x-amz-meta-surrogate-key`` field. This key is used to purge\n builds from the Fastly CDN when Editions change.\n If `None` then no header will be set.\n If the object already has a ``x-amz-meta-surrogate-key`` header then\n it will be replaced.\n cache_control : `str`, optional\n This sets (and overrides) the ``Cache-Control`` header on the copied\n files. The ``Cache-Control`` header specifically dictates how content\n is cached by the browser (if ``surrogate_control`` is also set).\n surrogate_control : `str`, optional\n This sets (and overrides) the ``x-amz-meta-surrogate-control`` header\n on the copied files. The ``Surrogate-Control``\n or ``x-amz-meta-surrogate-control`` header is used in priority by\n Fastly to givern it's caching. This caching policy is *not* passed\n to the browser.\n create_directory_redirect_object : `bool`, optional\n Create a directory redirect object for the root directory. The\n directory redirect object is an empty S3 object named after the\n directory (without a trailing slash) that contains a\n ``x-amz-meta-dir-redirect=true`` HTTP header. LSST the Docs' Fastly\n VCL is configured to redirect requests for a directory path to the\n directory's ``index.html`` (known as *courtesy redirects*).\n\n Raises\n ------\n ltdconveyor.s3.S3Error\n Thrown by any unexpected faults from the S3 API.\n RuntimeError\n Thrown when the source and destination directories are the same.", "id": "f2965:m0"} {"signature": "def ensure_login(ctx):", "body": "logger = logging.getLogger(__name__)logger.info('', __name__)if ctx.obj[''] is None:if ctx.obj[''] is None or ctx.obj[''] is None:raise click.UsageError('''')sys.exit()logger.debug('',ctx.obj[''],ctx.obj[''])token = get_keeper_token(ctx.obj[''],ctx.obj[''],ctx.obj[''])ctx.obj[''] = tokenlogger.debug('',ctx.obj[''],ctx.obj[''])else:logger.debug('')", "docstring": "Ensure a token is in the Click context object or authenticate and obtain\n the token from LTD Keeper.\n\n Parameters\n ----------\n ctx : `click.Context`\n The Click context. ``ctx.obj`` must be a `dict` that contains keys:\n ``keeper_hostname``, ``username``, ``password``, ``token``. This\n context object is prepared by the main Click group,\n `ltdconveyor.cli.main.main`.", "id": "f2972:m0"} {"signature": "def confirm_build(build_url, keeper_token):", "body": "data = {'': True}r = requests.patch(build_url,auth=(keeper_token, ''),json=data)if r.status_code != :raise KeeperError(r)", "docstring": "Confirm a build upload is complete.\n\n Wraps ``PATCH /builds/{build}``.\n\n Parameters\n ----------\n build_url : `str`\n URL of the build resource. Given a build resource, this URL is\n available from the ``self_url`` field.\n keeper_token : `str`\n Auth token (`ltdconveyor.keeper.get_keeper_token`).\n\n Raises\n ------\n ltdconveyor.keeper.KeeperError\n Raised if there is an error communicating with the LTD Keeper API.", "id": "f2973:m1"} {"signature": "def get_value(self, item, source_name):", "body": "val = item.get(source_name.encode(''), None)if val is not None:val = convert_string(val)return val", "docstring": "This method receives an item from the source and a source name,\nand returns the text content for the `source_name` node.", "id": "f2980:c0:m3"} {"signature": "def get_value(self, item, source_name):", "body": "return force_text(smart_str(item.findtext(source_name))).strip()", "docstring": "This method receives an item from the source and a source name,\nand returns the text content for the `source_name` node.", "id": "f2982:c0:m2"} {"signature": "def download_file(url, dest):", "body": "request = urllib2.Request(url)request.add_header('', '')opener = urllib2.build_opener()response = opener.open(request)data = response.read()if response.headers.get('', '') == '':stream = StringIO.StringIO(data)gzipper = gzip.GzipFile(fileobj=stream)data = gzipper.read()f = open(dest, '')f.write(data)f.close()", "docstring": "Downloads a HTTP resource from `url` and save to `dest`.\n\nCapable of dealing with Gzip compressed content.", "id": "f2985:m0"} {"signature": "@staticmethoddef build(data_path: str) -> Path:", "body": "return Path(data_path)", "docstring": "Base method that interprets ``data_path`` argument.\n\n Args:\n data_path: path to the tsv-file containing erroneous and corrected words\n\n Returns:\n the same path as a :class:`~pathlib.Path` object", "id": "f3002:c0:m1"} {"signature": "@staticmethoddef read(data_path: str, *args, **kwargs) -> Dict[str, List[Tuple[str, str]]]:", "body": "fname = TyposKartaslov.build(data_path)with open(str(fname), newline='', encoding='') as csvfile:reader = csv.reader(csvfile, delimiter='')next(reader)res = [(mistake, correct) for correct, mistake, weight in reader]return {'': res}", "docstring": "Read train data for spelling corrections algorithms\n\n Args:\n data_path: path that needs to be interpreted with :meth:`~deeppavlov.dataset_readers.typos_reader.TyposKartaslov.build`\n\n Returns:\n train data to pass to a :class:`~deeppavlov.dataset_iterators.typos_iterator.TyposDatasetIterator`", "id": "f3002:c2:m2"} {"signature": "@classmethod@overridesdef read(self, data_path: str, dialogs: bool = False) -> Dict[str, List]:", "body": "required_files = (self._data_fname(dt) for dt in ('', '', ''))if not all(Path(data_path, f).exists() for f in required_files):log.info(''.format(self.url, data_path))download_decompress(self.url, data_path)mark_done(data_path)data = {'': self._read_from_file(Path(data_path, self._data_fname('')), dialogs),'': self._read_from_file(Path(data_path, self._data_fname('')), dialogs),'': self._read_from_file(Path(data_path, self._data_fname('')), dialogs)}return data", "docstring": "Downloads ``'kvrest_public.tar.gz'``, decompresses, saves files to ``data_path``.\n\nParameters:\n data_path: path to save data\n dialogs: flag indices whether to output list of turns or list of dialogs\n\nReturns:\n dictionary with ``'train'`` containing dialogs from ``'kvret_train_public.json'``, ``'valid'`` containing dialogs from ``'kvret_valid_public.json'``, ``'test'`` containing dialogs from ``'kvret_test_public.json'``. Each fields is a list of tuples ``(x_i, y_i)``.", "id": "f3006:c0:m1"} {"signature": "@classmethod@overridesdef read(self, data_path: str, dialogs: bool = False) -> Dict[str, List]:", "body": "required_files = (self._data_fname(dt) for dt in ('', '', ''))if not all(Path(data_path, f).exists() for f in required_files):log.info(''.format(self.url, data_path))download_decompress(self.url, data_path)mark_done(data_path)data = {'': self._read_from_file(Path(data_path, self._data_fname('')), dialogs),'': self._read_from_file(Path(data_path, self._data_fname('')), dialogs),'': self._read_from_file(Path(data_path, self._data_fname('')), dialogs)}return data", "docstring": "Downloads ``'dstc2_v2.tar.gz'`` archive from ipavlov internal server,\ndecompresses and saves files to ``data_path``.\n\nParameters:\n data_path: path to save DSTC2 dataset\n dialogs: flag which indicates whether to output list of turns or\n list of dialogs\n\nReturns:\n dictionary that contains ``'train'`` field with dialogs from\n ``'dstc2-trn.jsonlist'``, ``'valid'`` field with dialogs from\n ``'dstc2-val.jsonlist'`` and ``'test'`` field with dialogs from\n ``'dstc2-tst.jsonlist'``. Each field is a list of tuples ``(x_i, y_i)``.", "id": "f3008:c0:m1"} {"signature": "def read(self, dir_path: str, dataset: Optional[str] = '', url: Optional[str] = None, *args, **kwargs)-> Dict[str, Dict[str, Any]]:", "body": "if url is not None:self.url = urlelif dataset == '':self.url = self.url_squadelif dataset == '':self.url = self.url_sber_squadelif dataset == '':self.url = self.url_multi_squadelse:raise RuntimeError(''.format(dataset))dir_path = Path(dir_path)required_files = [''.format(dt) for dt in ['', '']]if not dir_path.exists():dir_path.mkdir()if not all((dir_path / f).exists() for f in required_files):download_decompress(self.url, dir_path)dataset = {}for f in required_files:with dir_path.joinpath(f).open('', encoding='') as fp:data = json.load(fp)if f == '':dataset[''] = dataelse:dataset[''] = datareturn dataset", "docstring": "Args:\n dir_path: path to save data\n dataset: default dataset names: ``'SQuAD'``, ``'SberSQuAD'`` or ``'MultiSQuAD'``\n url: link to archive with dataset, use url argument if non-default dataset is used\n\nReturns:\n dataset split on train/valid\n\nRaises:\n RuntimeError: if `dataset` is not one of these: ``'SQuAD'``, ``'SberSQuAD'``, ``'MultiSQuAD'``.", "id": "f3016:c0:m0"} {"signature": "def read(self, dir_path: str, dataset: Optional[str] = '', url: Optional[str] = None, *args,**kwargs) -> Dict[str, Dict[str, Any]]:", "body": "if url is not None:self.url = urlelif dataset == '':self.url = self.url_multi_squad_retrelif dataset == '':self.url = self.url_multi_squad_ru_retrelse:raise RuntimeError(''.format(dataset))dir_path = Path(dir_path)required_files = [''.format(dt) for dt in ['', '']]if not dir_path.exists():dir_path.mkdir(parents=True)if not all((dir_path / f).exists() for f in required_files):download_decompress(self.url, dir_path)dataset = {}for f in required_files:if '' in f:dataset[''] = dir_path.joinpath(f)else:dataset[''] = dir_path.joinpath(f)return dataset", "docstring": "Args:\n dir_path: path to save data\n dataset: default dataset names: ``'MultiSQuADRetr'``, ``'MultiSQuADRuRetr'``\n url: link to archive with dataset, use url argument if non-default dataset is used\n\nReturns:\n dataset split on train/valid\n\nRaises:\n RuntimeError: if `dataset` is not one of these: ``'MultiSQuADRetr'``, ``'MultiSQuADRuRetr'``.", "id": "f3016:c1:m0"} {"signature": "def read(self, data_path: Union[Path, str], db_url: Optional[str] = None, *args,**kwargs) -> None:", "body": "logger.info('')try:save_path = expand_path(kwargs[''])except KeyError:raise ConfigError(f'')if save_path.exists() and save_path.with_suffix(f'').exists():returntry:dataset_format = kwargs['']except KeyError:raise ConfigError(f'')save_path.parent.mkdir(parents=True, exist_ok=True)if db_url:download_dir = save_path.parentlogger.info(f'')download(download_dir, db_url, force_download=False)returnself._build_db(save_path, dataset_format, expand_path(data_path))", "docstring": "Build a SQLite database from provided files, download SQLite database from a provided URL,\n or do nothing.\n\n Args:\n data_path: a directory/file with texts to create a database from\n db_url: path to a database url\n kwargs:\n save_path: a path where a database should be saved to, or path to a ready database\n dataset_format: initial data format; should be selected from ['txt', 'wiki', 'json']\n\n Returns:\n None", "id": "f3017:c0:m0"} {"signature": "@register_metric('')def round_f1_weighted(y_true, y_predicted):", "body": "try:predictions = [np.round(x) for x in y_predicted]except TypeError:predictions = y_predictedreturn f1_score(np.array(y_true), np.array(predictions), average=\"\")", "docstring": "Calculates F1 weighted measure.\n\nArgs:\n y_true: list of true values\n y_predicted: list of predicted values\n\nReturns:\n F1 score", "id": "f3023:m3"} {"signature": "def compute_bleu(reference_corpus, translation_corpus, max_order=,smooth=False):", "body": "matches_by_order = [] * max_orderpossible_matches_by_order = [] * max_orderreference_length = translation_length = for (references, translation) in zip(reference_corpus,translation_corpus):reference_length += min(len(r) for r in references)translation_length += len(translation)merged_ref_ngram_counts = collections.Counter()for reference in references:merged_ref_ngram_counts |= _get_ngrams(reference, max_order)translation_ngram_counts = _get_ngrams(translation, max_order)overlap = translation_ngram_counts & merged_ref_ngram_countsfor ngram in overlap:matches_by_order[len(ngram)-] += overlap[ngram]for order in range(, max_order+):possible_matches = len(translation) - order + if possible_matches > :possible_matches_by_order[order-] += possible_matchesprecisions = [] * max_orderfor i in range(, max_order):if smooth:precisions[i] = ((matches_by_order[i] + ) /(possible_matches_by_order[i] + ))else:if possible_matches_by_order[i] > :precisions[i] = (float(matches_by_order[i]) /possible_matches_by_order[i])else:precisions[i] = if min(precisions) > :p_log_sum = sum(( / max_order) * math.log(p) for p in precisions)geo_mean = math.exp(p_log_sum)else:geo_mean = ratio = float(translation_length) / reference_lengthif ratio > :bp = else:bp = math.exp( - / ratio)bleu = geo_mean * bpreturn (bleu, precisions, bp, ratio, translation_length, reference_length)", "docstring": "Computes BLEU score of translated segments against one or more references.\n\n Args:\n reference_corpus: list of lists of references for each translation. Each\n reference should be tokenized into a list of tokens.\n translation_corpus: list of translations to score. Each translation\n should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\n\n Returns:\n 3-Tuple with the BLEU score, n-gram precisions, geometric mean of n-gram\n precisions and brevity penalty.", "id": "f3030:m1"} {"signature": "def _get_ngrams(segment, max_order):", "body": "ngram_counts = collections.Counter()for order in range(, max_order + ):for i in range(, len(segment) - order + ):ngram = tuple(segment[i:i+order])ngram_counts[ngram] += return ngram_counts", "docstring": "Extracts all n-grams upto a given maximum order from an input segment.\n\n Args:\n segment: text segment from which n-grams will be extracted.\n max_order: maximum length in tokens of the n-grams returned by this\n methods.\n\n Returns:\n The Counter containing all n-grams upto max_order in segment\n with a count of how many times each n-gram occurred.", "id": "f3030:m0"} {"signature": "@register_metric('')def squad_v2_f1(y_true: List[List[str]], y_predicted: List[str]) -> float:", "body": "f1_total = for ground_truth, prediction in zip(y_true, y_predicted):prediction_tokens = normalize_answer(prediction).split()f1s = []for gt in ground_truth:gt_tokens = normalize_answer(gt).split()if len(gt_tokens) == or len(prediction_tokens) == :f1s.append(float(gt_tokens == prediction_tokens))continuecommon = Counter(prediction_tokens) & Counter(gt_tokens)num_same = sum(common.values())if num_same == :f1s.append()continueprecision = * num_same / len(prediction_tokens)recall = * num_same / len(gt_tokens)f1 = ( * precision * recall) / (precision + recall)f1s.append(f1)f1_total += max(f1s)return * f1_total / len(y_true) if len(y_true) > else ", "docstring": "Calculates F-1 score between y_true and y_predicted\n F-1 score uses the best matching y_true answer\n\n The same as in SQuAD-v2.0\n\n Args:\n y_true: list of correct answers (correct answers are represented by list of strings)\n y_predicted: list of predicted answers\n\n Returns:\n F-1 score : float", "id": "f3031:m2"} {"signature": "def fit(self, data: List[Dict[Any, Any]]) -> None:", "body": "log.info(f\"\")self.ec_data = [dict(item, **{'': self.preprocess.spacy2dict(self.preprocess.analyze(item[''])),'': self.preprocess.spacy2dict(self.preprocess.analyze(item['']+''+item['']))}) for item in data]log.info('')", "docstring": "Preprocess items `title` and `description` from the `data`\n\n Parameters:\n data: list of catalog items\n\n Returns:\n None", "id": "f3034:c0:m1"} {"signature": "def load(self, **kwargs) -> None:", "body": "log.info(f\"\")for path in self.load_path:if Path.is_file(path):self.ec_data += load_pickle(path)else:raise FileNotFoundErrorlog.info(f\"\")", "docstring": "Load classifier parameters", "id": "f3034:c0:m3"} {"signature": "def load(self) -> None:", "body": "log.info(\"\".format(self.load_path))self.ec_data, self.x_train_features = load_pickle(expand_path(self.load_path))", "docstring": "Load classifier parameters", "id": "f3035:c0:m3"} {"signature": "def save(self) -> None:", "body": "log.info(\"\".format(self.save_path))path = expand_path(self.save_path)save_pickle((self.ec_data, self.x_train_features), path)", "docstring": "Save classifier parameters", "id": "f3035:c0:m2"} {"signature": "def __call__(self, utterances_batch: list, history_batch: list,states_batch: Optional[list]=None) -> Tuple[list, list, list]:", "body": "batch_len = len(utterances_batch)confidence_batch = [] * batch_lenresponse_batch: List[Optional[str]] = [None] * batch_leninfer_indexes = []if not states_batch:states_batch: List[Optional[dict]] = [None] * batch_lenfor utt_i, utterance in enumerate(utterances_batch):if not states_batch[utt_i]:states_batch[utt_i] = {'': list(self.model.in_x), '': []}if utterance:states_batch[utt_i][''].pop()states_batch[utt_i][''].append(utterance)if states_batch[utt_i]['']:response = self.proposal.format(states_batch[utt_i][''][])response_batch[utt_i] = responseelse:infer_indexes.append(utt_i)if infer_indexes:infer_utterances = zip(*[tuple(states_batch[i]['']) for i in infer_indexes])infer_results = self.model(*infer_utterances)if len(self.model.out_params) > :infer_results = [''.join([str(out_y) for out_y in result]) for result in zip(*infer_results)]for infer_i, infer_result in zip(infer_indexes, infer_results):response_batch[infer_i] = infer_resultstates_batch[infer_i] = Nonereturn response_batch, confidence_batch, states_batch", "docstring": "Returns skill inference result.\n\n Returns batches of skill inference results, estimated confidence\n levels and up to date states corresponding to incoming utterance\n batch. Also handles interaction with multiargument models using\n skill states.\n\n Args:\n utterances_batch: A batch of utterances of any type.\n history_batch: Not used. A batch of list typed histories for each\n utterance.\n states_batch: A batch of states for each utterance.\n\n Returns:\n response: A batch of arbitrary typed skill inference results.\n confidence: A batch of float typed confidence levels for each of\n skill inference result.\n states: Optional. A batch of states for each response.", "id": "f3036:c0:m1"} {"signature": "def to_one_hot(x, k):", "body": "unit = np.eye(k, dtype=int)return unit[x]", "docstring": "Takes an array of integers and transforms it\nto an array of one-hot encoded vectors", "id": "f3044:m0"} {"signature": "@propertydef tags_number_(self) -> int:", "body": "return len(self.tags)", "docstring": "Tag vocabulary size", "id": "f3048:c0:m3"} {"signature": "def next_generation(self, generation: List[dict], scores: List[float], iteration: int) -> List[dict]:", "body": "next_population = self.selection_of_best_with_weights(generation, scores)log.info(\"\".format(self.n_saved_best_pretrained))offsprings = self.crossover(generation, scores)changable_next = self.mutation(offsprings)next_population.extend(changable_next)for i in range(self.n_saved_best_pretrained):if self.train_partition != :file_ext = str(Path(next_population[i][\"\"][\"\"]).suffix)next_population[i][\"\"][\"\"] = \"\".join(Path(next_population[i][\"\"][\"\"]).stem.split(\"\")[:-]) + \"\" + str(iteration % self.train_partition) + file_extif self.elitism_with_weights:self.insert_value_or_dict_into_config(next_population[i], self.path_to_models_load_path,self.get_value_from_config(next_population[i], self.path_to_models_save_path))else:self.insert_value_or_dict_into_config(next_population[i], self.path_to_models_load_path,str(self.models_path / f\"\" / f\"\"))self.insert_value_or_dict_into_config(next_population[i], self.path_to_models_save_path,str(self.models_path / f\"\" / f\"\"))for i in range(self.n_saved_best_pretrained, self.population_size):if self.train_partition != :file_ext = str(Path(next_population[i][\"\"][\"\"]).suffix)next_population[i][\"\"][\"\"] = \"\".join([str(p) for p in Path(next_population[i][\"\"][\"\"]).stem.split(\"\")[:-]])+ \"\" + str(iteration % self.train_partition) + file_extself.insert_value_or_dict_into_config(next_population[i], self.path_to_models_save_path,str(self.models_path / f\"\" / f\"\"))self.insert_value_or_dict_into_config(next_population[i], self.path_to_models_load_path,str(self.models_path / f\"\" / f\"\"))next_population[i][\"\"] = self.evolution_model_idself.evolution_model_id += return next_population", "docstring": "Provide replacement\n\nArgs:\n generation: current generation (set of self.population_size configs\n scores: corresponding scores that should be maximized\n iteration: iteration number\n\nReturns:\n the next generation according to the given scores of current generation", "id": "f3049:c0:m2"} {"signature": "def crossover(self, population: List[dict], scores: List[float]) -> List[dict]:", "body": "offsprings = []ranges = self.range_scores(scores)a = / ( - self.population_size)b = self.population_size / (self.population_size - )probas_to_be_parent = (a * ranges + b) / np.sum(a * ranges + b)intervals = np.array([np.sum(probas_to_be_parent[:i]) for i in range(self.population_size)])for i in range(self.population_size - self.n_saved_best_pretrained):rs = np.random.random()parents = population[np.where(rs[] > intervals)[][-]], population[np.where(rs[] > intervals)[][-]]if self.decision(self.p_crossover):params_perm = np.random.permutation(self.n_params)curr_offsprings = [deepcopy(parents[]),deepcopy(parents[])]part = int(self.crossover_power * self.n_params)for j in range(self.n_params - part, self.n_params):self.insert_value_or_dict_into_config(curr_offsprings[],self.paths_to_params[params_perm[j]],self.get_value_from_config(parents[],self.paths_to_params[params_perm[j]]))self.insert_value_or_dict_into_config(curr_offsprings[],self.paths_to_params[params_perm[j]],self.get_value_from_config(parents[],self.paths_to_params[params_perm[j]]))offsprings.append(deepcopy(curr_offsprings[]))else:offsprings.append(deepcopy(parents[]))return offsprings", "docstring": "Recombine randomly population in pairs and cross over them with given probability.\nCross over from two parents produces two offsprings\neach of which contains crossover_power portion of the parameter values from one parent,\n and the other (1 - crossover_power portion) from the other parent\n\nArgs:\n population: self.population_size individuums\n scores: list of corresponding scores\n\nReturns:\n (self.population_size - self.n_saved_best_pretained) offsprings", "id": "f3049:c0:m5"} {"signature": "def __init__(self, load_path: str, wiki_filename: str, entities_filename: str, inverted_index_filename: str,id_to_name_file: str, lemmatize: bool = True, debug: bool = False, rule_filter_entities: bool = True,use_inverted_index: bool = True, language: str = '', *args, **kwargs) -> None:", "body": "super().__init__(save_path=None, load_path=load_path)self.morph = pymorphy2.MorphAnalyzer()self.lemmatize = lemmatizeself.debug = debugself.rule_filter_entities = rule_filter_entitiesself.use_inverted_index = use_inverted_indexself._language = languageif language not in self.LANGUAGES:log.warning(f'')self._wiki_filename = wiki_filenameself._entities_filename = entities_filenameself.inverted_index_filename = inverted_index_filenameself.id_to_name_file = id_to_name_fileself.name_to_q: Optional[Dict[str, List[Tuple[str]]]] = Noneself.wikidata: Optional[Dict[str, List[List[str]]]] = Noneself.inverted_index: Optional[Dict[str, List[Tuple[str]]]] = Noneself.id_to_name: Optional[Dict[str, Dict[List[str]]]] = Noneself.load()if self.use_inverted_index:alphabet = \"\"dictionary_words = list(self.inverted_index.keys())self.searcher = LevenshteinSearcher(alphabet, dictionary_words)", "docstring": "Args:\n load_path: path to folder with wikidata files\n wiki_filename: file with Wikidata triplets\n entities_filename: file with dict of entity titles (keys) and entity ids (values)\n inverted_index_filename: file with dict of words (keys) and entities containing these words (values)\n id_to_name_file: file with dict of entity ids (keys) and entities names and aliases (values)\n lemmatize: whether to lemmatize tokens of extracted entity\n debug: whether to print entities extracted from Wikidata\n rule_filter_entities: whether to filter entities which do not fit the question\n use_inverted_index: whether to use inverted index for entity linking\n language - the language of the linker (used for filtration of some questions to improve overall performance)\n *args:\n **kwargs:", "id": "f3051:c0:m0"} {"signature": "def __call__(self, batch: List[List[List[Tuple[float, str]]]]) -> List[List[str]]:", "body": "return [self._infer_instance(candidates) for candidates in batch]", "docstring": "Choose the best candidate for every token\n\n Args:\n batch: batch of probabilities and string values of candidates for every token in a sentence\n\n Returns:\n batch of corrected tokenized sentences", "id": "f3053:c0:m1"} {"signature": "def transduce(self, first, second, threshold):", "body": "add_pred = (lambda x, y: x <= threshold)clear_pred =(lambda x, y: False)update_func = (lambda x, y: min(x, y))costs, backtraces = self._fill_levenshtein_table(first, second,update_func, add_pred, clear_pred,threshold=threshold)result = self._backtraces_to_transductions(first, second,backtraces, threshold, return_cost=True)return result", "docstring": "\u0412\u043e\u0437\u0432\u0440\u0430\u0449\u0430\u0435\u0442 \u0432\u0441\u0435 \u0442\u0440\u0430\u043d\u0441\u0434\u0443\u043a\u0446\u0438\u0438, \u043f\u0435\u0440\u0435\u0432\u043e\u0434\u044f\u0449\u0438\u0435 first \u0432 second,\n\u0447\u044c\u044f \u0441\u0442\u043e\u0438\u043c\u043e\u0441\u0442\u044c \u043d\u0435 \u043f\u0440\u0435\u0432\u044b\u0448\u0430\u0435\u0442 threshold\n\n\u0412\u043e\u0437\u0432\u0440\u0430\u0449\u0430\u0435\u0442:\n----------\nresult : list\n \u0441\u043f\u0438\u0441\u043e\u043a \u0432\u0438\u0434\u0430 [(\u0442\u0440\u0430\u043d\u0441\u0434\u0443\u043a\u0446\u0438\u044f, \u0441\u0442\u043e\u0438\u043c\u043e\u0441\u0442\u044c)]", "id": "f3056:c1:m4"} {"signature": "def lower_transductions(self, word, max_cost, return_cost=True):", "body": "prefixes = [[] for i in range(len(word) + )]prefixes[].append(((), ))for pos in range(len(prefixes)):prefixes[pos] = self._perform_insertions(prefixes[pos], max_cost)max_upperside_length = min(len(word) - pos, self.max_up_length)for upperside_length in range(, max_upperside_length + ):up = word[pos: pos + upperside_length]for low, low_cost in self.operation_costs.get(up, dict()).items():for transduction, cost in prefixes[pos]:new_cost = cost + low_costif new_cost <= max_cost:new_transduction = transduction +(up, low)prefixes[pos + upperside_length].append((new_transduction, new_cost))answer = sorted(prefixes[-], key=(lambda x: x[]))if return_cost:return answerelse:return [elem[] for elem in answer]", "docstring": "\u0412\u043e\u0437\u0432\u0440\u0430\u0449\u0430\u0435\u0442 \u0432\u0441\u0435 \u0442\u0440\u0430\u043d\u0441\u0434\u0443\u043a\u0446\u0438\u0438 \u0441 \u0432\u0435\u0440\u0445\u043d\u0438\u043c \u044d\u043b\u0435\u043c\u0435\u043d\u0442\u043e\u043c word,\n \u0447\u044c\u044f \u0441\u0442\u043e\u0438\u043c\u043e\u0441\u0442\u044c \u043d\u0435 \u043f\u0440\u0435\u0432\u044b\u0448\u0430\u0435\u0442 max_cost\n\n` \u0412\u043e\u0437\u0432\u0440\u0430\u0449\u0430\u0435\u0442:\n ----------\n result : list\n \u0441\u043f\u0438\u0441\u043e\u043a \u0432\u0438\u0434\u0430 [(\u0442\u0440\u0430\u043d\u0441\u0434\u0443\u043a\u0446\u0438\u044f, \u0441\u0442\u043e\u0438\u043c\u043e\u0441\u0442\u044c)], \u0435\u0441\u043b\u0438 return_cost=True\n \u0441\u043f\u0438\u0441\u043e\u043a \u0442\u0440\u0430\u043d\u0441\u0434\u0443\u043a\u0446\u0438\u0439, \u0435\u0441\u043b\u0438 return_cost=False\n \u0441\u043f\u0438\u0441\u043e\u043a \u043e\u0442\u0441\u043e\u0440\u0442\u0438\u0440\u043e\u0432\u0430\u043d \u0432 \u043f\u043e\u0440\u044f\u0434\u043a\u0435 \u0432\u043e\u0437\u0440\u0430\u0441\u0442\u0430\u043d\u0438\u044f \u0441\u0442\u043e\u0438\u043c\u043e\u0441\u0442\u0438 \u0442\u0440\u0430\u043d\u0441\u0434\u0443\u043a\u0446\u0438\u0438", "id": "f3056:c1:m5"} {"signature": "def _descend_cashed(self, curr, s):", "body": "if s == \"\":return currcurr_cash = self._descendance_cash[curr]answer = curr_cash.get(s, None)if answer is not None:return answerres = currfor a in s:res = self.graph[res][self.alphabet_codes[a]]if res == Trie.NO_NODE:breakcurr_cash[s] = resreturn res", "docstring": "\u0421\u043f\u0443\u0441\u043a \u0438\u0437 \u0432\u0435\u0440\u0448\u0438\u043d\u044b curr \u043f\u043e \u0441\u0442\u0440\u043e\u043a\u0435 s \u0441 \u043a\u044d\u0448\u0438\u0440\u043e\u0432\u0430\u043d\u0438\u0435\u043c", "id": "f3058:c0:m18"} {"signature": "def find_partitions(self, s, max_count=):", "body": "curr_agenda = [(self.root, [], )]for i, a in enumerate(s):next_agenda = []for curr, borders, cost in curr_agenda:if cost >= max_count:continuechild = self.graph[curr][self.alphabet_codes[a]]if child == Trie.NO_NODE:continuenext_agenda.append((child, borders, cost))if self.is_final(child):next_agenda.append((self.root, borders + [i+], cost+))curr_agenda = next_agendaanswer = []for curr, borders, cost in curr_agenda:if curr == self.root:borders = [] + bordersanswer.append([s[left:borders[i+]] for i, left in enumerate(borders[:-])])return answer", "docstring": "\u041d\u0430\u0445\u043e\u0434\u0438\u0442 \u0432\u0441\u0435 \u0440\u0430\u0437\u0431\u0438\u0435\u043d\u0438\u044f s = s_1 ... s_m \u043d\u0430 \u0441\u043b\u043e\u0432\u0430\u0440\u043d\u044b\u0435 \u0441\u043b\u043e\u0432\u0430 s_1, ..., s_m\n\u0434\u043b\u044f m <= max_count", "id": "f3058:c0:m12"} {"signature": "def add(self, s):", "body": "if self.is_terminated:raise TypeError(\"\")if s == \"\":self._set_final(self.root)returncurr = self.rootfor i, a in enumerate(s):code = self.alphabet_codes[a]next = self.graph[curr][code]if next == Trie.NO_NODE:curr = self._add_descendant(curr, s[i:])breakelse:curr = nextself._set_final(curr)return self", "docstring": "\u0414\u043e\u0431\u0430\u0432\u043b\u0435\u043d\u0438\u0435 \u0441\u0442\u0440\u043e\u043a\u0438 s \u0432 \u043f\u0440\u0435\u0444\u0438\u043a\u0441\u043d\u044b\u0439 \u0431\u043e\u0440", "id": "f3058:c0:m6"} {"signature": "def _set_final(self, curr):", "body": "self.final[curr] = True", "docstring": "\u0414\u0435\u043b\u0430\u0435\u0442 \u0441\u043e\u0441\u0442\u043e\u044f\u043d\u0438\u0435 curr \u0437\u0430\u0432\u0435\u0440\u0448\u0430\u044e\u0449\u0438\u043c", "id": "f3058:c0:m19"} {"signature": "def words(self):", "body": "branch, word, indexes = [self.root], [], []letters_with_children = [self._get_children_and_letters(self.root)]while len(branch) > :if self.is_final(branch[-]):yield \"\".join(word)while indexes[-] == len(letters_with_children[-]):indexes.pop()letters_with_children.pop()branch.pop()if len(indexes) == :raise StopIteration()word.pop()next_letter, next_child = letters_with_children[-][indexes[-]]indexes[-] += indexes.append()word.append(next_letter)branch.append(next_child)letters_with_children.append(self._get_children_and_letters(branch[-]))", "docstring": "\u0412\u043e\u0437\u0432\u0440\u0430\u0449\u0430\u0435\u0442 \u0438\u0442\u0435\u0440\u0430\u0442\u043e\u0440 \u043f\u043e \u0441\u043b\u043e\u0432\u0430\u043c, \u0441\u043e\u0434\u0435\u0440\u0436\u0430\u0449\u0438\u043c\u0441\u044f \u0432 \u0431\u043e\u0440\u0435", "id": "f3058:c0:m10"} {"signature": "def process_word(word: str, to_lower: bool = False,append_case: Optional[str] = None) -> Tuple[str]:", "body": "if all(x.isupper() for x in word) and len(word) > :uppercase = \"\"elif word[].isupper():uppercase = \"\"else:uppercase = Noneif to_lower:word = word.lower()if word.isdigit():answer = [\"\"]elif word.startswith(\"\") or word.startswith(\"\"):answer = [\"\"]else:answer = list(word)if to_lower and uppercase is not None:if append_case == \"\":answer = [uppercase] + answerelif append_case == \"\":answer = answer + [uppercase]return tuple(answer)", "docstring": "Converts word to a tuple of symbols, optionally converts it to lowercase\n and adds capitalization label.\n\n Args:\n word: input word\n to_lower: whether to lowercase\n append_case: whether to add case mark\n ('' for first capital and '' for all caps)\n\n Returns:\n a preprocessed word", "id": "f3061:m0"} {"signature": "def _get_idx(self, el: str) -> int:", "body": "for e in (el, el.lower(), el.capitalize(), el.upper()):if e in self.token2idx_dict:return self.token2idx_dict[e]return ", "docstring": "Returns idx for el (token or char).\n\n Args:\n el: token or character\n\n Returns:\n idx in vocabulary", "id": "f3063:c2:m7"} {"signature": "def __call__(self, contexts_raw: Tuple[str, ...], questions_raw: Tuple[str, ...],**kwargs) -> Tuple[List[str], List[List[str]], List[List[List[str]]],List[List[int]], List[List[int]],List[str], List[List[str]], List[List[List[str]]],List[List[Tuple[int, int]]]]:", "body": "contexts = []contexts_tokens = []contexts_chars = []contexts_r2p = []contexts_p2r = []questions = []questions_tokens = []questions_chars = []spans = []for c_raw, q_raw in zip(contexts_raw, questions_raw):c, r2p, p2r = SquadPreprocessor.preprocess_str(c_raw, return_mapping=True)c_tokens = [token.replace(\"\", '').replace(\"\", '') for token in word_tokenize(c)][:self.context_limit]c_chars = [list(token)[:self.char_limit] for token in c_tokens]q = SquadPreprocessor.preprocess_str(q_raw)q_tokens = [token.replace(\"\", '').replace(\"\", '') for token in word_tokenize(q)][:self.question_limit]q_chars = [list(token)[:self.char_limit] for token in q_tokens]contexts.append(c)contexts_tokens.append(c_tokens)contexts_chars.append(c_chars)contexts_r2p.append(r2p)contexts_p2r.append(p2r)questions.append(q)questions_tokens.append(q_tokens)questions_chars.append(q_chars)spans.append(SquadPreprocessor.convert_idx(c, c_tokens))return contexts, contexts_tokens, contexts_chars, contexts_r2p, contexts_p2r,questions, questions_tokens, questions_chars, spans", "docstring": "Performs preprocessing of context and question\n Args:\n contexts_raw: batch of contexts to preprocess\n questions_raw: batch of questions to preprocess\n\n Returns:\n context: batch of processed contexts\n contexts_tokens: batch of tokenized contexts\n contexts_chars: batch of tokenized and split on chars contexts\n contexts_r2p: batch of mappings from raw context to processed context\n contexts_p2r: batch of mappings from procesesd context to raw context\n questions: batch of processed questions\n questions_tokens: batch of tokenized questions\n questions_chars: batch of tokenized and split on chars questions\n spans: batch of mapping tokens to position in context", "id": "f3063:c0:m1"} {"signature": "def __call__(self, batch_docs: List[Union[str, List[str]]]) ->List[Union[List[str], List[List[str]]]]:", "body": "result = []for docs in batch_docs:batch_chunks = []if isinstance(docs, str):docs = [docs]for doc in docs:if self.paragraphs:split_doc = doc.split('')split_doc = [sd.strip() for sd in split_doc]split_doc = list(filter(lambda x: len(x) > , split_doc))batch_chunks.append(split_doc)else:doc_chunks = []if self.keep_sentences:sentences = sent_tokenize(doc)n_tokens = keep = []for s in sentences:n_tokens += len(s.split())if n_tokens > self.tokens_limit:if keep:doc_chunks.append(''.join(keep))n_tokens = keep.clear()keep.append(s)if keep:doc_chunks.append(''.join(keep))batch_chunks.append(doc_chunks)else:split_doc = doc.split()doc_chunks = [split_doc[i:i + self.tokens_limit] for i inrange(, len(split_doc), self.tokens_limit)]batch_chunks.append(doc_chunks)result.append(batch_chunks)if self.flatten_result:if isinstance(result[][], list):for i in range(len(result)):flattened = list(chain.from_iterable(result[i]))result[i] = flattenedreturn result", "docstring": "Make chunks from a batch of documents. There can be several documents in each batch.\n Args:\n batch_docs: a batch of documents / a batch of lists of documents\n Returns:\n chunks of docs, flattened or not", "id": "f3066:c0:m1"} {"signature": "def __call__(self, texts_a: List[str], texts_b: Optional[List[str]] = None) -> List[InputFeatures]:", "body": "if texts_b is None:texts_b = [None] * len(texts_a)examples = [InputExample(unique_id=, text_a=text_a, text_b=text_b)for text_a, text_b in zip(texts_a, texts_b)]return convert_examples_to_features(examples, self.max_seq_length, self.tokenizer)", "docstring": "Call Bert convert_examples_to_features function to tokenize and create masks.\n\n texts_a and texts_b are separated by [SEP] token\n\n Args:\n texts_a: list of texts,\n texts_b: list of texts, it could be None, e.g. single sentence classification task\n\n Returns:\n batch of InputFeatures with subtokens, subtoken ids, subtoken mask, segment mask.", "id": "f3067:c0:m1"} {"signature": "def __call__(self, tokens_batch, **kwargs):", "body": "lemma_batch = []for utterance in tokens_batch:lemma_utterance = []for token in utterance:p = self.lemmatizer.parse(token)[]lemma_utterance.append(p.normal_form)lemma_batch.append(lemma_utterance)return lemma_batch", "docstring": "Takes batch of tokens and returns the lemmatized tokens.", "id": "f3070:c0:m1"} {"signature": "def parse_input(self, inp: str) -> Dict[Any, Any]:", "body": "state: List = []for i in range(len(inp.split()) // , , -):state.append([inp.split(None, )[], inp.split(None, )[].split()[]])if i > :inp = inp.split(None, )[]return dict(state)", "docstring": "Convert space-delimited string into dialog state", "id": "f3074:c0:m9"} {"signature": "def spacy2dict(self, doc: spacy.tokens.Doc, fields: List[str] = None) -> List[Dict[Any, Any]]:", "body": "if fields is None:fields = ['', '', '', '']return [{field: getattr(token, field) for field in fields} for token in doc]", "docstring": "Convert SpaCy doc into list of tokens with `fields` properties only", "id": "f3074:c0:m4"} {"signature": "def load(self, fname: str = None) -> None:", "body": "if fname is None:fname = self.load_pathfname = Path(fname).with_suffix('')if fname.exists():log.info(\"\".format(self.model_class, str(fname)))with open(fname, \"\") as f:self.model = pickle.load(f)warm_start = self.model_params.get(\"\", None)self.model_params = {param: getattr(self.model, param) for param in self.get_class_attributes(self.model)}self.model_class = self.model.__module__ + self.model.__class__.__name__log.info(\"\".format(self.model_class))if warm_start and \"\" in self.model_params.keys():self.model_params[\"\"] = Truelog.info(\"\")else:log.warning(\"\"\"\")else:log.warning(\"\".format(str(fname)))self.init_from_scratch()return", "docstring": "Initialize ``self.model`` as some sklearn model from saved re-initializing ``self.model_params`` parameters. \\\n If in new given parameters ``warm_start`` is set to True and given model admits ``warm_start`` parameter, \\\n model will be initilized from saved with opportunity to continue fitting.\n\nArgs:\n fname: string name of path to model to load from\n\nReturns:\n None", "id": "f3077:c0:m4"} {"signature": "@staticmethoddef compose_input_data(x: List[Union[Tuple[Union[np.ndarray, list, spmatrix, str]],List[Union[np.ndarray, list, spmatrix, str]],np.ndarray, spmatrix]]) -> Union[spmatrix, np.ndarray]:", "body": "x_features = []for i in range(len(x)):if ((isinstance(x[i], tuple) or isinstance(x[i], list) or isinstance(x[i], np.ndarray) and len(x[i]))or (issparse(x[i]) and x[i].shape[])):if issparse(x[i][]):x_features.append(vstack(list(x[i])))elif isinstance(x[i][], np.ndarray) or isinstance(x[i][], list):x_features.append(np.vstack(list(x[i])))elif isinstance(x[i][], str):x_features.append(np.array(x[i]))else:raise ConfigError('')else:raise ConfigError(\"\")sparse = Falsefor inp in x_features:if issparse(inp):sparse = Trueif sparse:x_features = hstack(list(x_features))else:x_features = np.hstack(list(x_features))return x_features", "docstring": "Stack given list of different types of inputs to the one matrix. If one of the inputs is a sparse matrix, \\\n then output will be also a sparse matrix\n\nArgs:\n x: list of data elements\n\nReturns:\n sparse or dense array of stacked data", "id": "f3077:c0:m6"} {"signature": "def __call__(self, *args):", "body": "x_features = self.compose_input_data(args)try:predictions = self.infer_method(x_features)except TypeError or ValueError:if issparse(x_features):log.info(\"\".format(self.model_class))predictions = self.infer_method(x_features.todense())else:log.info(\"\".format(self.model_class))predictions = self.infer_method(csr_matrix(x_features))if isinstance(predictions, list):predictions_ = [[predictions[j][i][] for j in range(len(predictions))] for i in range(x_features.shape[])]predictions = np.array(predictions_)if self.ensure_list_output and len(predictions.shape) == :predictions = predictions.reshape(-, )if issparse(predictions):return predictionselse:return predictions.tolist()", "docstring": "Infer on the given data according to given in the config infer method, \\\n e.g. ``\"predict\", \"predict_proba\", \"transform\"``\n\nArgs:\n *args: list of inputs\n\nReturns:\n predictions, e.g. list of labels, array of probability distribution, sparse array of vectorized samples", "id": "f3077:c0:m2"} {"signature": "@overridesdef save(self, epoch: Optional[int] = None) -> None:", "body": "path = self.save_pathif epoch is not None:path = path.parent / self.epoch_save_path / str(epoch) / path.parts[-]path.resolve()log.info(f'')path.parent.mkdir(parents=True, exist_ok=True)path = str(path)log.info(f'')with self.graph.as_default():saver = tf.train.Saver()saver.save(self.sess, path)", "docstring": "Save model parameters to self.save_path", "id": "f3082:c0:m10"} {"signature": "def destroy(self) -> None:", "body": "if hasattr(self, ''):for k in list(self.sess.graph.get_all_collection_keys()):self.sess.graph.clear_collection(k)super().destroy()", "docstring": "Delete model from memory\n\nReturns:\n None", "id": "f3082:c0:m15"} {"signature": "def _deduplicate_indexed_slices(values, indices):", "body": "unique_indices, new_index_positions = tf.unique(indices)summed_values = tf.unsorted_segment_sum(values,new_index_positions,tf.shape(unique_indices)[])return (summed_values, unique_indices)", "docstring": "Sums `values` associated with any non-unique `indices`.\n Args:\n values: A `Tensor` with rank >= 1.\n indices: A one-dimensional integer `Tensor`, indexing into the first\n dimension of `values` (as in an IndexedSlices object).\n Returns:\n A tuple of (`summed_values`, `unique_indices`) where `unique_indices` is a\n de-duplicated version of `indices` and `summed_values` contains the sum of\n `values` slices associated with each unique index.", "id": "f3083:m2"} {"signature": "def make_module_spec(options, weight_file):", "body": "def module_fn():\"\"\"\"\"\"_bos_id = _eos_id = _bow_id = _eow_id = _pad_id = _max_word_length = _parallel_iterations = _max_batch_size = id_dtype = tf.int32id_nptype = np.int32max_word_length = tf.constant(_max_word_length, dtype=id_dtype, name='')version = tf.constant('', dtype=tf.string, name='')def _make_bos_eos(c):r = np.zeros([_max_word_length], dtype=id_nptype)r[:] = _pad_idr[] = _bow_idr[] = cr[] = _eow_idreturn tf.constant(r, dtype=id_dtype)bos_ids = _make_bos_eos(_bos_id)eos_ids = _make_bos_eos(_eos_id)def token2ids(token):with tf.name_scope(\"\"):char_ids = tf.decode_raw(token, tf.uint8, name='')char_ids = tf.cast(char_ids, tf.int32, name='')char_ids = tf.strided_slice(char_ids, [], [max_word_length - ],[], name='')ids_num = tf.shape(char_ids)[]fill_ids_num = (_max_word_length - ) - ids_numpads = tf.fill([fill_ids_num], _pad_id)bow_token_eow_pads = tf.concat([[_bow_id], char_ids, [_eow_id], pads],, name='')return bow_token_eow_padsdef sentence_tagging_and_padding(sen_dim):with tf.name_scope(\"\"):sen = sen_dim[]dim = sen_dim[]extra_dim = tf.shape(sen)[] - dimsen = tf.slice(sen, [, ], [dim, max_word_length], name='')bos_sen_eos = tf.concat([[bos_ids], sen, [eos_ids]], , name='')bos_sen_eos_plus_one = bos_sen_eos + bos_sen_eos_pads = tf.pad(bos_sen_eos_plus_one, [[, extra_dim], [, ]],\"\", name='')return bos_sen_eos_padstokens = tf.placeholder(shape=(None, None), dtype=tf.string, name='')sequence_len = tf.placeholder(shape=(None, ), dtype=tf.int32, name='')tok_shape = tf.shape(tokens)line_tokens = tf.reshape(tokens, shape=[-], name='')with tf.device(''):tok_ids = tf.map_fn(token2ids,line_tokens,dtype=tf.int32, back_prop=False, parallel_iterations=_parallel_iterations,name='')tok_ids = tf.reshape(tok_ids, [tok_shape[], tok_shape[], -], name='')with tf.device(''):sen_ids = tf.map_fn(sentence_tagging_and_padding,(tok_ids, sequence_len),dtype=tf.int32, back_prop=False, parallel_iterations=_parallel_iterations,name='')bilm = BidirectionalLanguageModel(options, str(weight_file),max_batch_size=_max_batch_size)embeddings_op = bilm(sen_ids)elmo_output = weight_layers('', embeddings_op, l2_coef=)weighted_op = elmo_output['']mean_op = elmo_output['']word_emb = elmo_output['']lstm_outputs1 = elmo_output['']lstm_outputs2 = elmo_output['']hub.add_signature(\"\", {\"\": tokens, \"\": sequence_len},{\"\": weighted_op,\"\": mean_op,\"\": word_emb,\"\": lstm_outputs1,\"\": lstm_outputs2,\"\": version})def_strings = tf.placeholder(shape=(None), dtype=tf.string)def_tokens_sparse = tf.string_split(def_strings)def_tokens_dense = tf.sparse_to_dense(sparse_indices=def_tokens_sparse.indices,output_shape=def_tokens_sparse.dense_shape,sparse_values=def_tokens_sparse.values,default_value='')def_mask = tf.not_equal(def_tokens_dense, '')def_int_mask = tf.cast(def_mask, dtype=tf.int32)def_sequence_len = tf.reduce_sum(def_int_mask, axis=-)def_tok_shape = tf.shape(def_tokens_dense)def_line_tokens = tf.reshape(def_tokens_dense, shape=[-], name='')with tf.device(''):def_tok_ids = tf.map_fn(token2ids,def_line_tokens,dtype=tf.int32, back_prop=False, parallel_iterations=_parallel_iterations,name='')def_tok_ids = tf.reshape(def_tok_ids, [def_tok_shape[], def_tok_shape[], -], name='')with tf.device(''):def_sen_ids = tf.map_fn(sentence_tagging_and_padding,(def_tok_ids, def_sequence_len),dtype=tf.int32, back_prop=False, parallel_iterations=_parallel_iterations,name='')def_embeddings_op = bilm(def_sen_ids)def_elmo_output = weight_layers('', def_embeddings_op, l2_coef=, reuse=True)def_weighted_op = def_elmo_output['']def_mean_op = def_elmo_output['']def_word_emb = def_elmo_output['']def_lstm_outputs1 = def_elmo_output['']def_lstm_outputs2 = def_elmo_output['']hub.add_signature(\"\", {\"\": def_strings},{\"\": def_weighted_op,\"\": def_mean_op,\"\": def_word_emb,\"\": def_lstm_outputs1,\"\": def_lstm_outputs2,\"\": version})return hub.create_module_spec(module_fn)", "docstring": "Makes a module spec.\n\n Args:\n options: LM hyperparameters.\n weight_file: location of the hdf5 file with LM weights.\n\n Returns:\n A module spec object used for constructing a TF-Hub module.", "id": "f3085:m0"} {"signature": "def __init__(self,options: dict,weight_file: str,use_character_inputs=True,embedding_weight_file=None,max_batch_size=):", "body": "if not use_character_inputs:if embedding_weight_file is None:raise ValueError(\"\"\"\")self._options = optionsself._weight_file = weight_fileself._embedding_weight_file = embedding_weight_fileself._use_character_inputs = use_character_inputsself._max_batch_size = max_batch_sizeself._ops = {}self._graphs = {}", "docstring": "Creates the language model computational graph and loads weights\n\nTwo options for input type:\n (1) To use character inputs (paired with Batcher)\n pass use_character_inputs=True, and ids_placeholder\n of shape (None, None, max_characters_per_token)\n to __call__\n (2) To use token ids as input (paired with TokenBatcher),\n pass use_character_inputs=False and ids_placeholder\n of shape (None, None) to __call__.\n In this case, embedding_weight_file is also required input\n\noptions_file: location of the json formatted file with\n LM hyperparameters\nweight_file: location of the hdf5 file with LM weights\nuse_character_inputs: if True, then use character ids as input,\n otherwise use token ids\nmax_batch_size: the maximum allowable batch size", "id": "f3086:c0:m0"} {"signature": "def get_counts(self, docs: List[str], doc_ids: List[Any])-> Generator[Tuple[KeysView, ValuesView, List[int]], Any, None]:", "body": "logger.info(\"\")batch_ngrams = list(self.tokenizer(docs))logger.info(\"\")doc_id = iter(doc_ids)for ngrams in batch_ngrams:counts = Counter([hash_(gram, self.hash_size) for gram in ngrams])hashes = counts.keys()values = counts.values()_id = self.doc_index[next(doc_id)]if values:col_id = [_id] * len(values)else:col_id = []yield hashes, values, col_id", "docstring": "Get term counts for a list of documents.\n\n Args:\n docs: a list of input documents\n doc_ids: a list of document ids corresponding to input documents\n\n Yields:\n a tuple of term hashes, count values and column ids\n\n Returns:\n None", "id": "f3087:c0:m3"} {"signature": "def load(self) -> None:", "body": "if not isinstance(self.load_path, list):self.load_path = [self.load_path]for i, path in enumerate(self.load_path):if isinstance(path, str):self.load_path[i] = pathlib.Path(path)labels_by_words = defaultdict(set)for infile in self.load_path:with infile.open(\"\", encoding=\"\") as fin:for line in fin:line = line.strip()if line.count(\"\") != :continueword, labels = line.split(\"\")labels_by_words[word].update(labels.split())self._initialize(labels_by_words)", "docstring": "Loads the dictionary from self.load_path", "id": "f3088:c1:m3"} {"signature": "def __call__(self, data: List) -> np.ndarray:", "body": "max_length = max(len(x) for x in data)answer = np.zeros(shape=(len(data), max_length, self.dim), dtype=int)for i, sent in enumerate(data):for j, word in enumerate(sent):answer[i, j][self._get_word_indexes(word)] = return answer", "docstring": "Transforms words to one-hot encoding according to the dictionary.\n\nArgs:\n data: the batch of words\n\nReturns:\n a 3D array. answer[i][j][k] = 1 iff data[i][j] is the k-th word in the dictionary.", "id": "f3088:c0:m3"} {"signature": "def __call__(self, input_doc_ids: List[List[Any]], input_doc_scores: List[List[float]]) ->Tuple[List[List], List[List]]:", "body": "batch_ids = []batch_scores = []for instance_ids, instance_scores in zip(input_doc_ids, input_doc_scores):instance_probas = []for idx, score in zip(instance_ids, instance_scores):pop = self.pop_dict.get(idx, self.mean_pop)features = [score, pop, score * pop]prob = self.clf.predict_proba([features])instance_probas.append(prob[][])sort = sorted(enumerate(instance_probas), key=itemgetter(), reverse=True)sorted_probas = [item[] for item in sort]sorted_ids = [instance_ids[item[]] for item in sort]if self.active:sorted_ids = sorted_ids[:self.top_n]sorted_probas = sorted_probas[:self.top_n]batch_ids.append(sorted_ids)batch_scores.append(sorted_probas)return batch_ids, batch_scores", "docstring": "Get tfidf scores and tfidf ids, re-rank them by applying logistic regression classifier,\n output pop ranker ids and pop ranker scores.\n\n Args:\n input_doc_ids: top input doc ids of tfidf ranker\n input_doc_scores: top input doc scores of tfidf ranker corresponding to doc ids\n\n Returns:\n top doc ids of pop ranker and their corresponding scores", "id": "f3093:c0:m1"} {"signature": "def __getitem__(self, key):", "body": "if isinstance(key, str):return self.act2templ[key]elif isinstance(key, Template):return self.templ2act[key]", "docstring": "If key is an str, returns corresponding template.\n If key is a Template, return corresponding action.\n If does not exist, return None.", "id": "f3094:c3:m2"} {"signature": "def __hash__(self):", "body": "return hash(self.text)", "docstring": "Override the default hash behavior (that returns the id)", "id": "f3094:c1:m5"} {"signature": "@abstractmethoddef get_features(self) -> np.ndarray:", "body": "pass", "docstring": "Returns:\n np.ndarray[float]: numpy array with calculates state features.", "id": "f3095:c0:m3"} {"signature": "def cnn_model_max_and_aver_pool(self, kernel_sizes_cnn: List[int], filters_cnn: int, dense_size: int,coef_reg_cnn: float = , coef_reg_den: float = , dropout_rate: float = ,input_projection_size: Optional[int] = None, **kwargs) -> Model:", "body": "inp = Input(shape=(self.opt[''], self.opt['']))output = inpif input_projection_size is not None:output = Dense(input_projection_size, activation='')(output)outputs = []for i in range(len(kernel_sizes_cnn)):output_i = Conv1D(filters_cnn, kernel_size=kernel_sizes_cnn[i],activation=None,kernel_regularizer=l2(coef_reg_cnn),padding='')(output)output_i = BatchNormalization()(output_i)output_i = Activation('')(output_i)output_i_0 = GlobalMaxPooling1D()(output_i)output_i_1 = GlobalAveragePooling1D()(output_i)output_i = Concatenate()([output_i_0, output_i_1])outputs.append(output_i)output = concatenate(outputs, axis=)output = Dropout(rate=dropout_rate)(output)output = Dense(dense_size, activation=None,kernel_regularizer=l2(coef_reg_den))(output)output = BatchNormalization()(output)output = Activation('')(output)output = Dropout(rate=dropout_rate)(output)output = Dense(self.n_classes, activation=None,kernel_regularizer=l2(coef_reg_den))(output)output = BatchNormalization()(output)act_output = Activation(self.opt.get(\"\", \"\"))(output)model = Model(inputs=inp, outputs=act_output)return model", "docstring": "Build un-compiled model of shallow-and-wide CNN where average pooling after convolutions is replaced with\nconcatenation of average and max poolings.\n\nArgs:\n kernel_sizes_cnn: list of kernel sizes of convolutions.\n filters_cnn: number of filters for convolutions.\n dense_size: number of units for dense layer.\n coef_reg_cnn: l2-regularization coefficient for convolutions. Default: ``0.0``.\n coef_reg_den: l2-regularization coefficient for dense layers. Default: ``0.0``.\n dropout_rate: dropout rate used after convolutions and between dense layers. Default: ``0.0``.\n input_projection_size: if not None, adds Dense layer (with ``relu`` activation)\n right after input layer to the size ``input_projection_size``.\n Useful for input dimentionaliry recuction. Default: ``None``.\n kwargs: other non-used parameters\n\nReturns:\n keras.models.Model: uncompiled instance of Keras Model", "id": "f3104:c0:m14"} {"signature": "def pad_texts(self, sentences: List[List[np.ndarray]]) -> Union[np.ndarray, Tuple[np.ndarray, np.ndarray]]:", "body": "pad = np.zeros(self.opt[''])cutted_batch = [sen[:self.opt['']] for sen in sentences]if self.opt[\"\"] == \"\":cutted_batch = [[pad] * (self.opt[''] - len(tokens)) + list(tokens) for tokens in cutted_batch]elif self.opt[\"\"] == \"\":cutted_batch = [list(tokens) + [pad] * (self.opt[''] - len(tokens)) for tokens in cutted_batch]else:raise ConfigError(\"\".format(self.opt['']))return np.asarray(cutted_batch)", "docstring": "Cut and pad tokenized texts to self.opt[\"text_size\"] tokens\n\nArgs:\n sentences: list of lists of tokens\n\nReturns:\n array of embedded texts", "id": "f3104:c0:m2"} {"signature": "def train_on_batch(self, texts: List[List[np.ndarray]], labels: list) -> Union[float, List[float]]:", "body": "features = self.check_input(texts)metrics_values = self.model.train_on_batch(features, np.squeeze(np.array(labels)))return metrics_values", "docstring": "Train the model on the given batch\n\nArgs:\n texts: list of tokenized embedded text samples\n labels: list of labels\n\nReturns:\n metrics values on the given batch", "id": "f3104:c0:m4"} {"signature": "def check_input(self, texts: List[List[np.ndarray]]) -> np.ndarray:", "body": "if self.opt[\"\"] is not None:features = self.pad_texts(texts)else:if len(texts[]):features = np.array(texts)else:features = np.zeros((, , self.opt[\"\"]))return features", "docstring": "Check and convert input to array of tokenized embedded samples\n\nArgs:\n texts: list of tokenized embedded text samples\n\nReturns:\n array of tokenized embedded texts samples that are cut and padded", "id": "f3104:c0:m3"} {"signature": "def __call__(self, data: Union[np.ndarray, List[List[float]], List[List[int]]],*args, **kwargs) -> Union[List[List[str]], List[str]]:", "body": "if self.confident_threshold:return [list(np.where(np.array(d) > self.confident_threshold)[])for d in data]elif self.max_proba:return [[np.argmax(d)] for d in data]elif self.top_n:return [np.argsort(d)[::-][:self.top_n] for d in data]else:raise ConfigError(\"\"\"\")", "docstring": "Process probabilities to labels\n\nArgs:\n data: list of vectors with probability distribution\n *args:\n **kwargs:\n\nReturns:\n list of labels (only label classification) or list of lists of labels (multi-label classification)", "id": "f3105:c0:m1"} {"signature": "def train_on_batch(self, c_tokens: np.ndarray, c_chars: np.ndarray, q_tokens: np.ndarray, q_chars: np.ndarray,y1s: Tuple[List[int], ...], y2s: Tuple[List[int], ...]) -> float:", "body": "y1s = np.array([x[] for x in y1s])y2s = np.array([x[] for x in y2s])if self.noans_token:noans_mask = ((y1s != -) * (y2s != -))y1s = (y1s + ) * noans_masky2s = (y2s + ) * noans_maskfeed_dict = self._build_feed_dict(c_tokens, c_chars, q_tokens, q_chars, y1s, y2s)loss, _, lear_rate = self.sess.run([self.loss, self.train_op, self.lear_rate_ph],feed_dict=feed_dict)report = {'': loss, '': float(lear_rate), '': self.get_momentum()}return report", "docstring": "This method is called by trainer to make one training step on one batch.\n\nArgs:\n c_tokens: batch of tokenized contexts\n c_chars: batch of tokenized contexts, each token split on chars\n q_tokens: batch of tokenized questions\n q_chars: batch of tokenized questions, each token split on chars\n y1s: batch of ground truth answer start positions\n y2s: batch of ground truth answer end positions\n\nReturns:\n value of loss function on batch", "id": "f3108:c0:m5"} {"signature": "def attention(inputs, state, att_size, mask, scope=\"\"):", "body": "with tf.variable_scope(scope):u = tf.concat([tf.tile(tf.expand_dims(state, axis=), [, tf.shape(inputs)[], ]), inputs], axis=)logits = tf.layers.dense(tf.layers.dense(u, att_size, activation=tf.nn.tanh), , use_bias=False)logits = softmax_mask(tf.squeeze(logits, []), mask)att_weights = tf.expand_dims(tf.nn.softmax(logits), axis=)res = tf.reduce_sum(att_weights * inputs, axis=)return res, logits", "docstring": "Computes weighted sum of inputs conditioned on state", "id": "f3109:m2"} {"signature": "def dot_attention(inputs, memory, mask, att_size, keep_prob=, scope=\"\"):", "body": "with tf.variable_scope(scope):BS, IL, IH = tf.unstack(tf.shape(inputs))BS, ML, MH = tf.unstack(tf.shape(memory))d_inputs = tf.nn.dropout(inputs, keep_prob=keep_prob, noise_shape=[BS, , IH])d_memory = tf.nn.dropout(memory, keep_prob=keep_prob, noise_shape=[BS, , MH])with tf.variable_scope(\"\"):inputs_att = tf.layers.dense(d_inputs, att_size, use_bias=False, activation=tf.nn.relu)memory_att = tf.layers.dense(d_memory, att_size, use_bias=False, activation=tf.nn.relu)logits = tf.matmul(inputs_att, tf.transpose(memory_att, [, , ])) / (att_size ** )mask = tf.tile(tf.expand_dims(mask, axis=), [, IL, ])att_weights = tf.nn.softmax(softmax_mask(logits, mask))outputs = tf.matmul(att_weights, memory)res = tf.concat([inputs, outputs], axis=)with tf.variable_scope(\"\"):dim = res.get_shape().as_list()[-]d_res = tf.nn.dropout(res, keep_prob=keep_prob, noise_shape=[BS, , IH + MH])gate = tf.layers.dense(d_res, dim, use_bias=False, activation=tf.nn.sigmoid)return res * gate", "docstring": "Computes attention vector for each item in inputs:\n attention vector is a weighted sum of memory items.\n Dot product between input and memory vector is used as similarity measure.\n\n Gate mechanism is applied to attention vectors to produce output.\n\n Args:\n inputs: Tensor [batch_size x input_len x feature_size]\n memory: Tensor [batch_size x memory_len x feature_size]\n mask: inputs mask\n att_size: hidden size of attention\n keep_prob: dropout keep_prob\n scope:\n\n Returns:\n attention vectors [batch_size x input_len x (feature_size + feature_size)]", "id": "f3109:m0"} {"signature": "@overridesdef __call__(self, batch: List[List[str]],*args, **kwargs) -> Union[List[np.ndarray], np.ndarray]:", "body": "if len(batch) > self.mini_batch_size:batch_gen = chunk_generator(batch, self.mini_batch_size)elmo_output_values = []for mini_batch in batch_gen:mini_batch_out = self._mini_batch_fit(mini_batch, *args, **kwargs)elmo_output_values.extend(mini_batch_out)else:elmo_output_values = self._mini_batch_fit(batch, *args, **kwargs)if self.pad_zero:elmo_output_values = zero_pad(elmo_output_values)return elmo_output_values", "docstring": "Embed sentences from a batch.\n\nArgs:\n batch: A list of tokenized text samples.\n\nReturns:\n A batch of ELMo embeddings.", "id": "f3113:c0:m5"} {"signature": "def _tags_encode(self, tokens: List[str], tags: List[str], mean: bool) -> Union[List[np.ndarray], np.ndarray]:", "body": "embedded_tokens = np.array(self.embedder([tokens]))[, :, :]tags_weights = np.array([self.tags_vocab.get(tag, ) for tag in tags])detokenized_sample = self.tokenizer([tokens])[] vectorized_sample = self.vectorizer([detokenized_sample]) if self.vectorizer:weights = np.array([vectorized_sample[, np.where(self.vocabulary == token)[][]]if len(np.where(self.vocabulary == token)[]) else for token in tokens])else:weights = np.array([self.get_weight(max(self.counter_vocab.get(token, ), self.idf_base_count))for token in tokens])weights = np.multiply(weights, tags_weights)if sum(weights) == :weights = np.ones(len(tokens))if mean is None:mean = self.meanif mean:embedded_tokens = np.average(embedded_tokens, weights=weights, axis=)else:embedded_tokens = np.array([weights[i] * embedded_tokens[i] for i in range(len(tokens))])return embedded_tokens", "docstring": "Embed one text sample\n\nArgs:\n tokens: tokenized text sample\n tags: tokenized tags sample\n mean: whether to return mean token embedding (does not depend on self.mean)\n\nReturns:\n list of embedded tokens or array of mean values", "id": "f3114:c0:m7"} {"signature": "@overridesdef __call__(self, batch: List[List[str]], tags_batch: Optional[List[List[str]]] = None, mean: bool = None,*args, **kwargs) -> List[Union[list, np.ndarray]]:", "body": "if self.tags_vocab:if tags_batch is None:raise ConfigError(\"\")batch = [self._tags_encode(sample, tags_sample, mean=mean) for sample, tags_sample in zip(batch, tags_batch)]else:if tags_batch:raise ConfigError(\"\")batch = [self._encode(sample, mean=mean) for sample in batch]if self.pad_zero:batch = zero_pad(batch)return batch", "docstring": "Infer on the given data\n\nArgs:\n batch: tokenized text samples\n tags_batch: optional batch of corresponding tags\n mean: whether to return mean token embedding (does not depend on self.mean)\n *args: additional arguments\n **kwargs: additional arguments\n\nReturns:", "id": "f3114:c0:m4"} {"signature": "@staticmethoddef space_detokenizer(batch: List[List[str]]) -> List[str]:", "body": "return [\"\".join(tokens) for tokens in batch]", "docstring": "Detokenizer by default. Linking tokens by space symbol\n\nArgs:\n batch: batch of tokenized texts\n\nReturns:\n batch of detokenized texts", "id": "f3114:c0:m3"} {"signature": "def _encode(self, tokens: List[str], mean: bool) -> Union[List[np.ndarray], np.ndarray]:", "body": "if self.vectorizer:detokenized_sample = self.tokenizer([tokens])[] vectorized_sample = self.vectorizer([detokenized_sample]) weights = np.array([vectorized_sample[, np.where(self.vocabulary == token)[][]]if len(np.where(self.vocabulary == token)[]) else for token in tokens])else:weights = np.array([self.get_weight(max(self.counter_vocab.get(token, ), self.idf_base_count))for token in tokens])if sum(weights) == :weights = np.ones(len(tokens))embedded_tokens = np.array(self.embedder([tokens]))[, :, :]if mean is None:mean = self.meanif mean:embedded_tokens = np.average(embedded_tokens, weights=weights, axis=)else:embedded_tokens = np.array([weights[i] * embedded_tokens[i] for i in range(len(tokens))])return embedded_tokens", "docstring": "Embed one text sample\n\nArgs:\n tokens: tokenized text sample\n mean: whether to return mean token embedding (does not depend on self.mean)\n\nReturns:\n list of embedded tokens or array of mean values", "id": "f3114:c0:m5"} {"signature": "def load(self) -> None:", "body": "log.info(f\"\")if not self.load_path.exists():log.warning(f'')returnself.model = KeyedVectors.load_word2vec_format(str(self.load_path))self.dim = self.model.vector_size", "docstring": "Load dict of embeddings from given file", "id": "f3115:c0:m1"} {"signature": "def __call__(self, batch: List[str]) -> List[List[str]]:", "body": "if isinstance(batch, (list, tuple)):return [sample.split() for sample in batch]else:raise NotImplementedError('''')", "docstring": "Tokenize given batch\n\nArgs:\n batch: list of texts to tokenize\n\nReturns:\n tokenized batch", "id": "f3121:c0:m1"} {"signature": "def __call__(self, batch: List[Union[str, List[str]]]) -> List[Union[List[str], str]]:", "body": "if isinstance(batch[], str):return [self.tokenizer.tokenize(line, escape=self.escape) for line in batch]else:return [self.detokenizer.detokenize(line, return_str=True, unescape=self.escape)for line in batch]", "docstring": "Tokenize given batch of strings or detokenize given batch of lists of tokens\n\n Args:\n batch: list of text samples or list of lists of tokens\n\n Returns:\n list of lists of tokens or list of text samples", "id": "f3124:c0:m1"} {"signature": "def detokenize(tokens):", "body": "text = ''.join(tokens)step0 = text.replace('', '')step1 = step0.replace(\"\", '').replace(\"\", '')step2 = step1.replace(\"\", \"\").replace(\"\", \"\")step3 = re.sub(r'', r\"\", step2)step4 = re.sub(r'', r\"\", step3)step5 = step4.replace(\"\", \"\").replace(\"\", \"\").replace(\"\", \"\").replace(\"\", \"\")step6 = step5.replace(\"\", \"\")return step6.strip()", "docstring": "Detokenizing a text undoes the tokenizing operation, restores\npunctuation and spaces to the places that people expect them to be.\nIdeally, `detokenize(tokenize(text))` should be identical to `text`,\nexcept for line breaks.", "id": "f3126:m0"} {"signature": "def main():", "body": "args = parser.parse_args()run_ms_bot_framework_server(agent_generator=make_agent,app_id=args.ms_id,app_secret=args.ms_secret,stateful=True)", "docstring": "Parse parameters and run ms bot framework", "id": "f3132:m4"} {"signature": "def show_details(item_data: Dict[Any, Any]) -> str:", "body": "txt = \"\"for key, value in item_data.items():txt += \"\" + str(key) + \"\" + '' + str(value) + \"\"return txt", "docstring": "Format catalog item output\n\n Parameters:\n item_data: item's attributes values\n\n Returns:\n [rich_message]: list of formatted rich message", "id": "f3132:m2"} {"signature": "def json(self) -> dict:", "body": "self.control_json[''] = self.contentreturn self.control_json", "docstring": "Returns json compatible state of the PlainText instance.\n\n Returns:\n control_json: Json representation of PlainText state.", "id": "f3136:c0:m2"} {"signature": "def get_chainer(self) -> Chainer:", "body": "self._load()return self._chainer", "docstring": "Return a :class:`~deeppavlov.core.common.chainer.Chainer` built from ``self.chainer_config`` for inference", "id": "f3141:c0:m3"} {"signature": "def add_control(self, control: RichControl):", "body": "self.controls.append(control)", "docstring": "Adds RichControl instance to RichMessage.\n\n Args:\n control: RichControl instance.", "id": "f3143:c2:m2"} {"signature": "def telegram(self):", "body": "return None", "docstring": "Returns Telegram compatible state of the control instance\n including its nested controls.\n\n Returns:\n control: Telegram representation of control state.", "id": "f3143:c0:m2"} {"signature": "def alexa(self) -> list:", "body": "alexa_controls = [control.alexa() for control in self.controls]return alexa_controls", "docstring": "Returns list of Amazon Alexa compatible states of the RichMessage\n instance nested controls.\n\n Returns:\n alexa_controls: Amazon Alexa representation of RichMessage instance nested\n controls.", "id": "f3143:c2:m6"} {"signature": "@abstractmethoddef json(self) -> Union[list, dict]:", "body": "pass", "docstring": "Returns json compatible state of the control instance including\n its nested controls.\n\n Returns:\n control: Json representation of control state.", "id": "f3143:c0:m0"} {"signature": "def json(self) -> list:", "body": "json_controls = [control.json() for control in self.controls]return json_controls", "docstring": "Returns list of json compatible states of the RichMessage instance\n nested controls.\n\n Returns:\n json_controls: Json representation of RichMessage instance\n nested controls.", "id": "f3143:c2:m3"} {"signature": "def _log(self, utterance: Any, direction: str, dialog_id: Optional[Hashable]=None):", "body": "if isinstance(utterance, str):passelif isinstance(utterance, RichMessage):utterance = utterance.json()elif isinstance(utterance, (list, dict)):utterance = jsonify_data(utterance)else:utterance = str(utterance)dialog_id = str(dialog_id) if not isinstance(dialog_id, str) else dialog_idif self.log_file.tell() >= self.log_max_size * :self.log_file.close()self.log_file = self._get_log_file()else:try:log_msg = {}log_msg[''] = self._get_timestamp_utc_str()log_msg[''] = dialog_idlog_msg[''] = directionlog_msg[''] = utterancelog_str = json.dumps(log_msg, ensure_ascii=self.config[''])self.log_file.write(f'')except IOError:log.error('')", "docstring": "Logs single dialog utterance to current dialog log file.\n\n Args:\n utterance: Dialog utterance.\n direction: 'in' or 'out' utterance direction.\n dialog_id: Dialog ID.", "id": "f3144:c0:m3"} {"signature": "def log_in(self, utterance: Any, dialog_id: Optional[Hashable] = None) -> None:", "body": "if self.enabled:self._log(utterance, '', dialog_id)", "docstring": "Wraps _log method for all input utterances.\n Args:\n utterance: Dialog utterance.\n dialog_id: Dialog ID.", "id": "f3144:c0:m4"} {"signature": "@abstractmethoddef _call(self, utterances_batch: list, utterances_ids: Optional[list] = None) -> list:", "body": "pass", "docstring": "Processes batch of utterances and returns corresponding responses batch.\n\n Each call of Agent processes incoming utterances and returns response\n for each utterance Batch of dialog IDs can be provided, in other case\n utterances indexes in incoming batch are used as dialog IDs.\n\n Args:\n utterances_batch: Batch of incoming utterances.\n utterances_ids: Batch of dialog IDs corresponding to incoming utterances.\n\n Returns:\n responses: A batch of responses corresponding to the\n utterance batch received by agent.", "id": "f3145:c0:m2"} {"signature": "@staticmethoddef print_number_of_parameters():", "body": "log.info('')variables = tf.trainable_variables()blocks = defaultdict(int)for var in variables:block_name = var.name.split('')[]number_of_parameters = np.prod(var.get_shape().as_list())blocks[block_name] += number_of_parametersfor block_name, cnt in blocks.items():log.info(\"\".format(block_name, cnt))total_num_parameters = np.sum(list(blocks.values()))log.info(''.format(total_num_parameters))", "docstring": "Print number of *trainable* parameters in the network", "id": "f3150:c0:m8"} {"signature": "def get_train_op(self,loss,learning_rate,optimizer=None,clip_norm=None,learnable_scopes=None,optimizer_scope_name=None,**kwargs):", "body": "if optimizer_scope_name is None:opt_scope = tf.variable_scope('')else:opt_scope = tf.variable_scope(optimizer_scope_name)with opt_scope:if learnable_scopes is None:variables_to_train = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)else:variables_to_train = []for scope_name in learnable_scopes:variables_to_train.extend(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope_name))if optimizer is None:optimizer = tf.train.AdamOptimizerextra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)with tf.control_dependencies(extra_update_ops):def clip_if_not_none(grad):if grad is not None:return tf.clip_by_norm(grad, clip_norm)opt = optimizer(learning_rate, **kwargs)grads_and_vars = opt.compute_gradients(loss, var_list=variables_to_train)if clip_norm is not None:grads_and_vars = [(clip_if_not_none(grad), var)for grad, var in grads_and_vars]train_op = opt.apply_gradients(grads_and_vars)return train_op", "docstring": "Get train operation for given loss\n\nArgs:\n loss: loss, tf tensor or scalar\n learning_rate: scalar or placeholder.\n clip_norm: clip gradients norm by clip_norm.\n learnable_scopes: which scopes are trainable (None for all).\n optimizer: instance of tf.train.Optimizer, default Adam.\n **kwargs: parameters passed to tf.train.Optimizer object\n (scalars or placeholders).\n\nReturns:\n train_op", "id": "f3150:c0:m7"} {"signature": "def load(self) -> None:", "body": "if self.load_path.exists():path = str(self.load_path.resolve())log.info(''.format(path))self._net.load(path)", "docstring": "Checks existence of the model file, loads the model if the file exists", "id": "f3152:c1:m1"} {"signature": "@abstractmethoddef get_optimizer(self):", "body": "pass", "docstring": "Return instance of keras optimizer\n\nArgs:\n None", "id": "f3152:c2:m1"} {"signature": "def __call__(self, *x_batch, **kwargs) -> Union[List, np.ndarray]:", "body": "with self.graph.as_default():K.set_session(self.sess)return self._net.predict_on_batch(x_batch, **kwargs)", "docstring": "Predicts answers on batch elements.\n\nArgs:\n instance: a batch to predict answers on", "id": "f3152:c1:m4"} {"signature": "def process_event(self, event_name: str, data: dict):", "body": "if (isinstance(self.opt.get(\"\", None), float) andisinstance(self.opt.get(\"\", None), float)):passelse:if event_name == '':if (self.get_learning_rate_variable() is not None) and ('' not in data):data[''] = float(K.get_value(self.get_learning_rate_variable()))if (self.get_momentum_variable() is not None) and ('' not in data):data[''] = float(K.get_value(self.get_momentum_variable()))else:super().process_event(event_name, data)", "docstring": "Process event after epoch\nArgs:\n event_name: whether event is send after epoch or batch.\n Set of values: ``\"after_epoch\", \"after_batch\"``\n data: event data (dictionary)\n\nReturns:\n None", "id": "f3152:c2:m7"} {"signature": "def save(self) -> None:", "body": "path = str(self.save_path.absolute())log.info(''.format(path))self._net.save(path)", "docstring": "Saves model to the save_path, provided in config. The directory is\n already created by super().__init__, which is called in __init__ of this class", "id": "f3152:c1:m2"} {"signature": "def get_learning_rate_variable(self):", "body": "return self._lr_var", "docstring": "Return current learning rate variable\n\nReturns:\n learning rate variable", "id": "f3155:c2:m5"} {"signature": "def __init__(self,learning_rate: Union[None, float, Tuple[float, float]] = None,learning_rate_decay: Union[DType, Tuple[DType, float]] = DecayType.NO,learning_rate_decay_epochs: int = ,learning_rate_decay_batches: int = ,learning_rate_drop_div: float = ,learning_rate_drop_patience: Optional[int] = None,momentum: Union[None, float, Tuple[float, float]] = None,momentum_decay: Union[DType, Tuple[DType, float]] = DecayType.NO,momentum_decay_epochs: int = ,momentum_decay_batches: int = ,fit_batch_size: Union[None, int, str] = None,fit_learning_rate: Tuple[float, float] = (, ),fit_learning_rate_div: float = ,fit_beta: float = ,fit_min_batches: int = ,fit_max_batches: Optional[int] = None,*args, **kwargs) -> None:", "body": "if learning_rate_decay_epochs and learning_rate_decay_batches:raise ConfigError(\"\"\"\")if momentum_decay_epochs and momentum_decay_batches:raise ConfigError(\"\"\"\")start_val, end_val = learning_rate, Noneif isinstance(learning_rate, (tuple, list)):start_val, end_val = learning_ratedec_type, extra = learning_rate_decay, Noneif isinstance(learning_rate_decay, (tuple, list)):dec_type, extra = learning_rate_decayself._lr = start_valnum_it, self._lr_update_on_batch = learning_rate_decay_epochs, Falseif learning_rate_decay_batches > :num_it, self._lr_update_on_batch = learning_rate_decay_batches, Trueself._lr_schedule = DecayScheduler(start_val=start_val, end_val=end_val,num_it=num_it, dec_type=dec_type, extra=extra)self._lr_var = self._init_learning_rate_variable()start_val, end_val = momentum, Noneif isinstance(momentum, (tuple, list)):start_val, end_val = momentumdec_type, extra = momentum_decay, Noneif isinstance(momentum_decay, (tuple, list)):dec_type, extra = momentum_decayself._mom = start_valnum_it, self._mom_update_on_batch = momentum_decay_epochs, Falseself._mom_update_on_batch = momentum_decay_batches > num_it = momentum_decay_epochs if self._mom_update_on_batch else momentum_decay_batchesself._mom_schedule = DecayScheduler(start_val=start_val, end_val=end_val,num_it=num_it, dec_type=dec_type,extra=extra)self._mom_var = self._init_momentum_variable()self._learning_rate_drop_patience = learning_rate_drop_patienceself._learning_rate_drop_div = learning_rate_drop_divself._learning_rate_cur_impatience = self._learning_rate_last_impatience = self._learning_rate_cur_div = self._fit_batch_size = fit_batch_sizeself._fit_learning_rate = fit_learning_rateself._fit_learning_rate_div = fit_learning_rate_divself._fit_beta = fit_betaself._fit_min_batches = fit_min_batchesself._fit_max_batches = fit_max_batches", "docstring": "Initialize learning rate scheduler", "id": "f3155:c2:m3"} {"signature": "def get_momentum_variable(self):", "body": "return self._mom_var", "docstring": "Return current momentum variable\n\nReturns:\n momentum variable", "id": "f3155:c2:m7"} {"signature": "@staticmethoddef _get_best(values: List[float], losses: List[float],max_loss_div: float = , min_val_div: float = ) -> float:", "body": "assert len(values) == len(losses), \"\"min_ind = np.argmin(losses)for i in range(min_ind - , , -):if (losses[i] * max_loss_div > losses[min_ind]) or(values[i] * min_val_div < values[min_ind]):return values[i + ]return values[min_ind] / min_val_div", "docstring": "Find the best value according to given losses\n\nArgs:\n values: list of considered values\n losses: list of obtained loss values corresponding to `values`\n max_loss_div: maximal divergence of loss to be considered significant\n min_val_div: minimum divergence of loss to be considered significant\n\nReturns:\n best value divided by `min_val_div`", "id": "f3155:c2:m9"} {"signature": "def stacked_cnn(units: tf.Tensor,n_hidden_list: List,filter_width=,use_batch_norm=False,use_dilation=False,training_ph=None,add_l2_losses=False):", "body": "l2_reg = tf.nn.l2_loss if add_l2_losses else Nonefor n_layer, n_hidden in enumerate(n_hidden_list):if use_dilation:dilation_rate = ** n_layerelse:dilation_rate = units = tf.layers.conv1d(units,n_hidden,filter_width,padding='',dilation_rate=dilation_rate,kernel_initializer=INITIALIZER(),kernel_regularizer=l2_reg)if use_batch_norm:assert training_ph is not Noneunits = tf.layers.batch_normalization(units, training=training_ph)units = tf.nn.relu(units)return units", "docstring": "Number of convolutional layers stacked on top of each other\n\n Args:\n units: a tensorflow tensor with dimensionality [None, n_tokens, n_features]\n n_hidden_list: list with number of hidden units at the ouput of each layer\n filter_width: width of the kernel in tokens\n use_batch_norm: whether to use batch normalization between layers\n use_dilation: use power of 2 dilation scheme [1, 2, 4, 8 .. ] for layers 1, 2, 3, 4 ...\n training_ph: boolean placeholder determining whether is training phase now or not.\n It is used only for batch normalization to determine whether to use\n current batch average (std) or memory stored average (std)\n add_l2_losses: whether to add l2 losses on network kernels to\n tf.GraphKeys.REGULARIZATION_LOSSES or not\n\n Returns:\n units: tensor at the output of the last convolutional layer", "id": "f3157:m0"} {"signature": "def stacked_bi_rnn(units: tf.Tensor,n_hidden_list: List,cell_type='',seq_lengths=None,use_peepholes=False,name=''):", "body": "for n, n_hidden in enumerate(n_hidden_list):with tf.variable_scope(name + '' + str(n)):if cell_type == '':forward_cell = tf.nn.rnn_cell.GRUCell(n_hidden)backward_cell = tf.nn.rnn_cell.GRUCell(n_hidden)elif cell_type == '':forward_cell = tf.nn.rnn_cell.LSTMCell(n_hidden, use_peepholes=use_peepholes)backward_cell = tf.nn.rnn_cell.LSTMCell(n_hidden, use_peepholes=use_peepholes)else:raise RuntimeError('')(rnn_output_fw, rnn_output_bw), (fw, bw) =tf.nn.bidirectional_dynamic_rnn(forward_cell,backward_cell,units,dtype=tf.float32,sequence_length=seq_lengths)units = tf.concat([rnn_output_fw, rnn_output_bw], axis=)if cell_type == '':last_units = tf.concat([fw, bw], axis=)else:(c_fw, h_fw), (c_bw, h_bw) = fw, bwc = tf.concat([c_fw, c_bw], axis=)h = tf.concat([h_fw, h_bw], axis=)last_units = (h, c)return units, last_units", "docstring": "Stackted recurrent neural networks GRU or LSTM\n\n Args:\n units: a tensorflow tensor with dimensionality [None, n_tokens, n_features]\n n_hidden_list: list with number of hidden units at the ouput of each layer\n seq_lengths: length of sequences for different length sequences in batch\n can be None for maximum length as a length for every sample in the batch\n cell_type: 'lstm' or 'gru'\n use_peepholes: whether to use peephole connections (only 'lstm' case affected)\n name: what variable_scope to use for the network parameters\n Returns:\n units: tensor at the output of the last recurrent layer\n with dimensionality [None, n_tokens, n_hidden_list[-1]]\n last_units: tensor of last hidden states for GRU and tuple\n of last hidden stated and last cell states for LSTM\n dimensionality of cell states and hidden states are\n similar and equal to [B x 2 * H], where B - batch\n size and H is number of hidden units", "id": "f3157:m3"} {"signature": "def multiplicative_self_attention(units, n_hidden=None, n_output_features=None, activation=None):", "body": "n_input_features = units.get_shape().as_list()[]if n_hidden is None:n_hidden = n_input_featuresif n_output_features is None:n_output_features = n_input_featuresqueries = tf.layers.dense(expand_tile(units, ), n_hidden, kernel_initializer=INITIALIZER())keys = tf.layers.dense(expand_tile(units, ), n_hidden, kernel_initializer=INITIALIZER())scores = tf.reduce_sum(queries * keys, axis=, keep_dims=True)attention = tf.nn.softmax(scores, dim=)attended_units = tf.reduce_sum(attention * expand_tile(units, ), axis=)output = tf.layers.dense(attended_units, n_output_features, activation, kernel_initializer=INITIALIZER())return output", "docstring": "Computes multiplicative self attention for time series of vectors (with batch dimension)\n the formula: score(h_i, h_j) = , W_1 and W_2 are learnable matrices\n with dimensionality [n_hidden, n_input_features], where stands for a and b\n dot product\n\n Args:\n units: tf tensor with dimensionality [batch_size, time_steps, n_input_features]\n n_hidden: number of units in hidden representation of similarity measure\n n_output_features: number of features in output dense layer\n activation: activation at the output\n\n Returns:\n output: self attended tensor with dimensionality [batch_size, time_steps, n_output_features]", "id": "f3157:m10"} {"signature": "def bi_rnn(units: tf.Tensor,n_hidden: List,cell_type='',seq_lengths=None,trainable_initial_states=False,use_peepholes=False,name=''):", "body": "with tf.variable_scope(name + '' + cell_type.upper()):if cell_type == '':forward_cell = tf.nn.rnn_cell.GRUCell(n_hidden, kernel_initializer=INITIALIZER())backward_cell = tf.nn.rnn_cell.GRUCell(n_hidden, kernel_initializer=INITIALIZER())if trainable_initial_states:initial_state_fw = tf.tile(tf.get_variable('', [, n_hidden]), (tf.shape(units)[], ))initial_state_bw = tf.tile(tf.get_variable('', [, n_hidden]), (tf.shape(units)[], ))else:initial_state_fw = initial_state_bw = Noneelif cell_type == '':forward_cell = tf.nn.rnn_cell.LSTMCell(n_hidden, use_peepholes=use_peepholes, initializer=INITIALIZER())backward_cell = tf.nn.rnn_cell.LSTMCell(n_hidden, use_peepholes=use_peepholes, initializer=INITIALIZER())if trainable_initial_states:initial_state_fw = tf.nn.rnn_cell.LSTMStateTuple(tf.tile(tf.get_variable('', [, n_hidden]), (tf.shape(units)[], )),tf.tile(tf.get_variable('', [, n_hidden]), (tf.shape(units)[], )))initial_state_bw = tf.nn.rnn_cell.LSTMStateTuple(tf.tile(tf.get_variable('', [, n_hidden]), (tf.shape(units)[], )),tf.tile(tf.get_variable('', [, n_hidden]), (tf.shape(units)[], )))else:initial_state_fw = initial_state_bw = Noneelse:raise RuntimeError('')(rnn_output_fw, rnn_output_bw), (fw, bw) =tf.nn.bidirectional_dynamic_rnn(forward_cell,backward_cell,units,dtype=tf.float32,sequence_length=seq_lengths,initial_state_fw=initial_state_fw,initial_state_bw=initial_state_bw)kernels = [var for var in forward_cell.trainable_variables +backward_cell.trainable_variables if '' in var.name]for kernel in kernels:tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES, tf.nn.l2_loss(kernel))return (rnn_output_fw, rnn_output_bw), (fw, bw)", "docstring": "Bi directional recurrent neural network. GRU or LSTM\n\n Args:\n units: a tensorflow tensor with dimensionality [None, n_tokens, n_features]\n n_hidden: list with number of hidden units at the ouput of each layer\n seq_lengths: length of sequences for different length sequences in batch\n can be None for maximum length as a length for every sample in the batch\n cell_type: 'lstm' or 'gru'\n trainable_initial_states: whether to create a special trainable variable\n to initialize the hidden states of the network or use just zeros\n use_peepholes: whether to use peephole connections (only 'lstm' case affected)\n name: what variable_scope to use for the network parameters\n\n Returns:\n units: tensor at the output of the last recurrent layer\n with dimensionality [None, n_tokens, n_hidden_list[-1]]\n last_units: tensor of last hidden states for GRU and tuple\n of last hidden stated and last cell states for LSTM\n dimensionality of cell states and hidden states are\n similar and equal to [B x 2 * H], where B - batch\n size and H is number of hidden units", "id": "f3157:m2"} {"signature": "def u_shape(units: tf.Tensor,n_hidden_list: List,filter_width=,use_batch_norm=False,training_ph=None):", "body": "units_for_skip_conn = []conv_net_params = {'': filter_width,'': use_batch_norm,'': training_ph}for n_hidden in n_hidden_list:units = stacked_cnn(units, [n_hidden], **conv_net_params)units_for_skip_conn.append(units)units = tf.layers.max_pooling1d(units, pool_size=, strides=, padding='')units = stacked_cnn(units, [n_hidden], **conv_net_params)for down_step, n_hidden in enumerate(n_hidden_list[::-]):units = tf.expand_dims(units, axis=)units = tf.layers.conv2d_transpose(units, n_hidden, filter_width, strides=(, ), padding='')units = tf.squeeze(units, axis=)skip_units = units_for_skip_conn[-(down_step + )]if skip_units.get_shape().as_list()[-] != n_hidden:skip_units = tf.layers.dense(skip_units, n_hidden)units = skip_units + unitsunits = stacked_cnn(units, [n_hidden], **conv_net_params)return units", "docstring": "Network architecture inspired by One Hundred layer Tiramisu.\n https://arxiv.org/abs/1611.09326. U-Net like.\n\n Args:\n units: a tensorflow tensor with dimensionality [None, n_tokens, n_features]\n n_hidden_list: list with number of hidden units at the ouput of each layer\n filter_width: width of the kernel in tokens\n use_batch_norm: whether to use batch normalization between layers\n training_ph: boolean placeholder determining whether is training phase now or not.\n It is used only for batch normalization to determine whether to use\n current batch average (std) or memory stored average (std)\n Returns:\n units: tensor at the output of the last convolutional layer\n with dimensionality [None, n_tokens, n_hidden_list[-1]]", "id": "f3157:m4"} {"signature": "def variational_dropout(units, keep_prob, fixed_mask_dims=(,)):", "body": "units_shape = tf.shape(units)noise_shape = [units_shape[n] for n in range(len(units.shape))]for dim in fixed_mask_dims:noise_shape[dim] = return tf.nn.dropout(units, keep_prob, noise_shape)", "docstring": "Dropout with the same drop mask for all fixed_mask_dims\n\n Args:\n units: a tensor, usually with shapes [B x T x F], where\n B - batch size\n T - tokens dimension\n F - feature dimension\n keep_prob: keep probability\n fixed_mask_dims: in these dimensions the mask will be the same\n\n Returns:\n dropped units tensor", "id": "f3157:m20"} {"signature": "def expand_tile(units, axis):", "body": "assert axis in (, )n_time_steps = tf.shape(units)[]repetitions = [, , , ]repetitions[axis] = n_time_stepsreturn tf.tile(tf.expand_dims(units, axis), repetitions)", "docstring": "Expand and tile tensor along given axis\n Args:\n units: tf tensor with dimensions [batch_size, time_steps, n_input_features]\n axis: axis along which expand and tile. Must be 1 or 2", "id": "f3157:m8"} {"signature": "def attention_gen_block(hidden_for_sketch, hidden_for_attn_alignment, key, attention_depth):", "body": "with tf.name_scope(''):sketch_dims = tf.shape(hidden_for_sketch)batch_size = sketch_dims[]num_tokens = sketch_dims[]hidden_size = sketch_dims[]attn_alignment_dims = tf.shape(hidden_for_attn_alignment)attn_alignment_hidden_size = attn_alignment_dims[]sketches = [tf.zeros(shape=[batch_size, hidden_size], dtype=tf.float32)]aligned_hiddens = []cum_att = tf.zeros(shape=[batch_size, num_tokens]) for i in range(attention_depth):sketch, cum_att_, aligned_hidden = attention_gen_step(hidden_for_sketch, hidden_for_attn_alignment, sketches[-], key, cum_att)sketches.append(sketch) aligned_hiddens.append(aligned_hidden) cum_att += cum_att_final_aligned_hiddens = tf.reshape(tf.transpose(tf.stack(aligned_hiddens), [, , ]),[, attention_depth, attn_alignment_hidden_size])return final_aligned_hiddens", "docstring": "It is a implementation of the Luong et al. attention mechanism with general score and the constrained softmax (csoftmax).\n Based on the papers:\n https://arxiv.org/abs/1508.04025 \"Effective Approaches to Attention-based Neural Machine Translation\"\n https://andre-martins.github.io/docs/emnlp2017_final.pdf \"Learning What's Easy: Fully Differentiable Neural Easy-First Taggers\"\n Args:\n hidden_for_sketch: A tensorflow tensor for a sketch computing. This tensor have dimensionality [None, max_num_tokens, sketch_hidden_size]\n hidden_for_attn_alignment: A tensorflow tensor is aligned for output during a performing. This tensor have dimensionality [None, max_num_tokens, hidden_size_for_attn_alignment]\n key: A tensorflow tensor with dimensionality [None, None, key_size]\n attention_depth: Number of usage csoftmax\n Returns:\n final_aligned_hiddens: Tensor at the output with dimensionality [1, attention_depth, hidden_size_for_attn_alignment]", "id": "f3158:m3"} {"signature": "def csoftmax(tensor, inv_cumulative_att):", "body": "shape_ten = tensor.shapeshape_cum = inv_cumulative_att.shapemerge_tensor = [tensor, inv_cumulative_att]cs, _ = tf.map_fn(csoftmax_for_slice, merge_tensor, dtype=[tf.float32, tf.float32]) return cs", "docstring": "It is a implementation of the constrained softmax (csoftmax).\n Based on the paper:\n https://andre-martins.github.io/docs/emnlp2017_final.pdf \"Learning What's Easy: Fully Differentiable Neural Easy-First Taggers\"\n Args:\n tensor: A tensorflow tensor is score. This tensor have dimensionality [None, n_tokens]\n inv_cumulative_att: A inverse cumulative attention tensor with dimensionality [None, n_tokens]\n Returns:\n cs: Tensor at the output with dimensionality [None, n_tokens]", "id": "f3158:m1"} {"signature": "def bahdanau_attention(key, context, hidden_size, projected_align=False):", "body": "if hidden_size % != :raise ValueError(\"\")batch_size = tf.shape(context)[]max_num_tokens, token_size = context.get_shape().as_list()[-:]r_context = tf.reshape(context, shape=[-, max_num_tokens, token_size])projected_key = tf.layers.dense(key, hidden_size, kernel_initializer=xav())r_projected_key =tf.tile(tf.reshape(projected_key, shape=[-, , hidden_size]),[, max_num_tokens, ])lstm_fw_cell = tf.nn.rnn_cell.LSTMCell(hidden_size//)lstm_bw_cell = tf.nn.rnn_cell.LSTMCell(hidden_size//)(output_fw, output_bw), states =tf.nn.bidirectional_dynamic_rnn(cell_fw=lstm_fw_cell,cell_bw=lstm_bw_cell,inputs=r_context,dtype=tf.float32)bilstm_output = tf.concat([output_fw, output_bw], -)concat_h_state = tf.concat([r_projected_key, output_fw, output_bw], -)projected_state =tf.layers.dense(concat_h_state, hidden_size, use_bias=False,kernel_initializer=xav())score =tf.layers.dense(tf.tanh(projected_state), units=, use_bias=False,kernel_initializer=xav())attn = tf.nn.softmax(score, dim=)if projected_align:log.info(\"\")t_context = tf.transpose(bilstm_output, [, , ])output = tf.reshape(tf.matmul(t_context, attn),shape=[batch_size, -, hidden_size])else:log.info(\"\")t_context = tf.transpose(r_context, [, , ])output = tf.reshape(tf.matmul(t_context, attn),shape=[batch_size, -, token_size])return output", "docstring": "It is a implementation of the Bahdanau et al. attention mechanism. Based on the paper:\n https://arxiv.org/abs/1409.0473 \"Neural Machine Translation by Jointly Learning to Align and Translate\"\n Args:\n key: A tensorflow tensor with dimensionality [None, None, key_size]\n context: A tensorflow tensor with dimensionality [None, None, max_num_tokens, token_size]\n hidden_size: Number of units in hidden representation\n projected_align: Using bidirectional lstm for hidden representation of context.\n If true, beetween input and attention mechanism insert layer of bidirectional lstm with dimensionality [hidden_size].\n If false, bidirectional lstm is not used.\n Returns:\n output: Tensor at the output with dimensionality [None, None, hidden_size]", "id": "f3159:m3"} {"signature": "def multiplicative_self_attention(units, n_hidden=None, n_output_features=None, activation=None):", "body": "n_input_features = K.int_shape(units)[]if n_hidden is None:n_hidden = n_input_featuresif n_output_features is None:n_output_features = n_input_featuresexp1 = Lambda(lambda x: expand_tile(x, axis=))(units)exp2 = Lambda(lambda x: expand_tile(x, axis=))(units)queries = Dense(n_hidden)(exp1)keys = Dense(n_hidden)(exp2)scores = Lambda(lambda x: K.sum(queries * x, axis=, keepdims=True))(keys)attention = Lambda(lambda x: softmax(x, axis=))(scores)mult = Multiply()([attention, exp1])attended_units = Lambda(lambda x: K.sum(x, axis=))(mult)output = Dense(n_output_features, activation=activation)(attended_units)return output", "docstring": "Compute multiplicative self attention for time series of vectors (with batch dimension)\nthe formula: score(h_i, h_j) = , W_1 and W_2 are learnable matrices\nwith dimensionality [n_hidden, n_input_features]\n\nArgs:\n units: tf tensor with dimensionality [batch_size, time_steps, n_input_features]\n n_hidden: number of units in hidden representation of similarity measure\n n_output_features: number of features in output dense layer\n activation: activation at the output\n\nReturns:\n output: self attended tensor with dimensionality [batch_size, time_steps, n_output_features]", "id": "f3160:m2"} {"signature": "def additive_self_attention(units, n_hidden=None, n_output_features=None, activation=None):", "body": "n_input_features = K.int_shape(units)[]if n_hidden is None:n_hidden = n_input_featuresif n_output_features is None:n_output_features = n_input_featuresexp1 = Lambda(lambda x: expand_tile(x, axis=))(units)exp2 = Lambda(lambda x: expand_tile(x, axis=))(units)units_pairs = Concatenate(axis=)([exp1, exp2])query = Dense(n_hidden, activation=\"\")(units_pairs)attention = Dense(, activation=lambda x: softmax(x, axis=))(query)attended_units = Lambda(lambda x: K.sum(attention * x, axis=))(exp1)output = Dense(n_output_features, activation=activation)(attended_units)return output", "docstring": "Compute additive self attention for time series of vectors (with batch dimension)\n the formula: score(h_i, h_j) = \n v is a learnable vector of n_hidden dimensionality,\n W_1 and W_2 are learnable [n_hidden, n_input_features] matrices\n\nArgs:\n units: tf tensor with dimensionality [batch_size, time_steps, n_input_features]\n n_hidden: number of2784131 units in hidden representation of similarity measure\n n_output_features: number of features in output dense layer\n activation: activation at the output\n\nReturns:\n output: self attended tensor with dimensionality [batch_size, time_steps, n_output_features]", "id": "f3160:m1"} {"signature": "def interact_model(config: Union[str, Path, dict]) -> None:", "body": "model = build_model(config)while True:args = []for in_x in model.in_x:args.append((input(''.format(in_x)),))if args[-][] in {'', '', '', ''}:returnpred = model(*args)if len(model.out_params) > :pred = zip(*pred)print('', *pred)", "docstring": "Start interaction with the model described in corresponding configuration file.", "id": "f3162:m1"} {"signature": "def initialize_params_in_config(self, basic_config: dict, paths: List[list]) -> dict:", "body": "config = deepcopy(basic_config)for path_ in paths:param_name = path_[-]value = self.get_value_from_config(basic_config, path_)if isinstance(value, dict):if (value.get(self.prefix + \"\") orvalue.get(self.prefix + \"\") orvalue.get(self.prefix + \"\")):self.insert_value_or_dict_into_config(config, path_,self.sample_params(**{param_name: deepcopy(value)})[param_name])return config", "docstring": "Randomly initialize all the changable parameters in config\n\nArgs:\n basic_config: config where changable parameters are dictionaries with keys\n ``prefix`_range`, ``prefix`_bool`, ``prefix`_choice`\n paths: list of paths to changable parameters\n\nReturns:\n config", "id": "f3168:c0:m5"} {"signature": "def sample_params(self, **params) -> dict:", "body": "if not params:return {}else:params_copy = deepcopy(params)params_sample = dict()for param, param_val in params_copy.items():if isinstance(param_val, dict):if self.prefix + '' in param_val and param_val[self.prefix + '']:sample = bool(random.choice([True, False]))elif self.prefix + '' in param_val:sample = self._sample_from_ranges(param_val)elif self.prefix + '' in param_val:sample = random.choice(param_val[self.prefix + ''])else:sample = param_valparams_sample[param] = sampleelse:params_sample[param] = params_copy[param]return params_sample", "docstring": "Sample parameters according to the given possible values\n\nArgs:\n **params: dictionary like {\"param_0\": {\"`prefix`_range\": [0, 10]},\n \"param_1\": {\"`prefix`_range\": [0, 10], \"discrete\": true},\n \"param_2\": {\"`prefix`_range\": [0, 1], \"scale\": \"log\"},\n \"param_3\": {\"`prefix`_bool\": true},\n \"param_4\": {\"`prefix`_choice\": [0, 1, 2, 3]}}\n\nReturns:\n dictionary with randomly sampled parameters", "id": "f3168:c0:m6"} {"signature": "def get_metric_by_name(name: str) -> Callable[..., Any]:", "body": "if name not in _REGISTRY:raise ConfigError(f'')return fn_from_str(_REGISTRY[name])", "docstring": "Returns a metric callable with a corresponding name.", "id": "f3170:m2"} {"signature": "def read(self, data_path: str, *args, **kwargs) -> Dict[str, List[Tuple[Any, Any]]]:", "body": "raise NotImplementedError", "docstring": "Reads a file from a path and returns data as a list of tuples of inputs and correct outputs\n for every data type in ``train``, ``valid`` and ``test``.", "id": "f3176:c0:m0"} {"signature": "def path_set_md5(url):", "body": "scheme, netloc, path, query_string, fragment = urlsplit(url)path += ''return urlunsplit((scheme, netloc, path, query_string, fragment))", "docstring": "Given a file URL, return a md5 query of the file\n\n Args:\n url: a given URL\n Returns:\n URL of the md5 file", "id": "f3180:m24"} {"signature": "def set_query_parameter(url, param_name, param_value):", "body": "scheme, netloc, path, query_string, fragment = urlsplit(url)query_params = parse_qs(query_string)query_params[param_name] = [param_value]new_query_string = urlencode(query_params, doseq=True)return urlunsplit((scheme, netloc, path, new_query_string, fragment))", "docstring": "Given a URL, set or replace a query parameter and return the modified URL.\n\n Args:\n url: a given URL\n param_name: the parameter name to add\n param_value: the parameter value\n Returns:\n URL with the added parameter", "id": "f3180:m25"} {"signature": "def _init_agent(self) -> DefaultAgent:", "body": "agent = self.agent_generator()return agent", "docstring": "Initiates Alexa skill agent from agent generator", "id": "f3193:c0:m3"} {"signature": "def verify_cert(signature_chain_url: str) -> Optional[crypto.X509]:", "body": "try:certs_chain_get = requests.get(signature_chain_url)except requests.exceptions.ConnectionError as e:log.error(f'')return Nonecerts_chain_txt = certs_chain_get.textcerts_chain = extract_certs(certs_chain_txt)amazon_cert: crypto.X509 = certs_chain.pop()sc_url_verification = verify_sc_url(signature_chain_url)if not sc_url_verification:log.error(f'')expired_verification = not amazon_cert.has_expired()if not expired_verification:log.error(f'')sans_verification = verify_sans(amazon_cert)if not sans_verification:log.error(f'')chain_verification = verify_certs_chain(certs_chain, amazon_cert)if not chain_verification:log.error(f'')result = (sc_url_verification and expired_verification and sans_verification and chain_verification)return amazon_cert if result else None", "docstring": "Conducts series of Alexa SSL certificate verifications against Amazon Alexa requirements.\n\n Args:\n signature_chain_url: Signature certificate URL from SignatureCertChainUrl HTTP header.\n Returns:\n result: Amazon certificate if verification was successful, None if not.", "id": "f3194:m5"} {"signature": "def verify_sans(amazon_cert: crypto.X509) -> bool:", "body": "cert_extentions = [amazon_cert.get_extension(i) for i in range(amazon_cert.get_extension_count())]subject_alt_names = ''for extention in cert_extentions:if '' in str(extention.get_short_name()):subject_alt_names = extention.__str__()breakresult = '' in subject_alt_namesreturn result", "docstring": "Verifies Subject Alternative Names (SANs) for Amazon certificate.\n\n Args:\n amazon_cert: Pycrypto X509 Amazon certificate.\n\n Returns:\n result: True if verification was successful, False if not.", "id": "f3194:m2"} {"signature": "def handle_request(self, request: dict) -> dict:", "body": "request_type = request['']['']request_id = request['']['']log.debug(f'')if request_type in self.handled_requests.keys():response: dict = self.handled_requests[request_type](request)else:response: dict = self.handled_requests[''](request)log.warning(f'')self._rearm_self_destruct()return response", "docstring": "Routes Alexa requests to appropriate handlers.\n\n Args:\n request: Alexa request.\n Returns:\n response: Response conforming Alexa response specification.", "id": "f3195:c0:m3"} {"signature": "def _start_timer(self) -> None:", "body": "self.timer = Timer(self.config[''], self.self_destruct_callback)self.timer.start()", "docstring": "Initiates self-destruct timer.", "id": "f3195:c0:m1"} {"signature": "@overridesdef gen_batches(self, batch_size: int, shuffle: bool = None)-> Generator[Tuple[List[str], List[int]], Any, None]:", "body": "if shuffle is None:shuffle = self.shuffleif shuffle:_doc_ids = self.random.sample(self.doc_ids, len(self.doc_ids))else:_doc_ids = self.doc_idsif batch_size > :batches = [_doc_ids[i:i + batch_size] for i inrange(, len(_doc_ids), batch_size)]else:batches = [_doc_ids]for i, doc_ids in enumerate(batches):docs = [self.get_doc_content(doc_id) for doc_id in doc_ids]doc_nums = [self.doc2index[doc_id] for doc_id in doc_ids]yield docs, zip(doc_ids, doc_nums)", "docstring": "Gen batches of documents.\n\n Args:\n batch_size: a number of samples in a single batch\n shuffle: whether to shuffle data during batching\n\n Yields:\n generated tuple of documents and their ids", "id": "f3203:c0:m5"} {"signature": "def get_db_name(self) -> str:", "body": "cursor = self.connect.cursor()cursor.execute(\"\")assert cursor.arraysize == name = cursor.fetchone()[]cursor.close()return name", "docstring": "Get DB name.\n\n Returns:\n DB name", "id": "f3203:c0:m2"} {"signature": "def map_doc2idx(self) -> Dict[int, Any]:", "body": "doc2idx = {doc_id: i for i, doc_id in enumerate(self.doc_ids)}logger.info(\"\".format(len(doc2idx)))return doc2idx", "docstring": "Map DB ids to integer ids.\n\n Returns:\n a dictionary of document titles and correspondent integer indices", "id": "f3203:c0:m3"} {"signature": "@overridesdef get_doc_ids(self) -> List[Any]:", "body": "cursor = self.connect.cursor()cursor.execute(''.format(self.db_name))ids = [ids[] for ids in cursor.fetchall()]cursor.close()return ids", "docstring": "Get document ids.\n\n Returns:\n document ids", "id": "f3203:c0:m1"} {"signature": "def get_instances(self):", "body": "doc_ids = list(self.doc_ids)docs = [self.get_doc_content(doc_id) for doc_id in doc_ids]doc_nums = [self.doc2index[doc_id] for doc_id in doc_ids]return docs, zip(doc_ids, doc_nums)", "docstring": "Get all data", "id": "f3203:c0:m6"} {"signature": "@overridesdef get_doc_content(self, doc_id: Any) -> Optional[str]:", "body": "cursor = self.connect.cursor()cursor.execute(\"\".format(self.db_name),(doc_id,))result = cursor.fetchone()cursor.close()return result if result is None else result[]", "docstring": "Get document content by id.\n\n Args:\n doc_id: a document id\n\n Returns:\n document content if success, else raise Exception", "id": "f3203:c0:m4"} {"signature": "@staticmethoddef _extract_cqas(data: Dict[str, Any]) -> List[Tuple[Tuple[str, str], Tuple[List[str], List[int]]]]:", "body": "cqas = []if data:for article in data['']:for par in article['']:context = par['']for qa in par['']:q = qa['']ans_text = []ans_start = []for answer in qa['']:ans_text.append(answer[''])ans_start.append(answer[''])cqas.append(((context, q), (ans_text, ans_start)))return cqas", "docstring": "Extracts context, question, answer, answer_start from SQuAD data\n\n Args:\n data: data in squad format\n\n Returns:\n list of (context, question), (answer_text, answer_start)\n answer text and answer_start are lists", "id": "f3205:c0:m1"} {"signature": "def gen_batches(self, batch_size: int, data_type: str = '',shuffle: bool = None, return_indexes: bool = False) -> Iterator[tuple]:", "body": "if shuffle is None:shuffle = self.shuffledata = self.data[data_type]if shuffle:random.shuffle(data)lengths = [len(x[]) for x in data]indexes = np.argsort(lengths)L = len(data)if batch_size < :batch_size = Lfor start in range(, L, batch_size):indexes_to_yield = indexes[start:start+batch_size]data_to_yield = tuple(list(x) for x in zip(*([data[i] for i in indexes_to_yield])))if return_indexes:yield indexes_to_yield, data_to_yieldelse:yield data_to_yield", "docstring": "Generate batches of inputs and expected output to train neural networks\n\n Args:\n batch_size: number of samples in batch\n data_type: can be either 'train', 'test', or 'valid'\n shuffle: whether to shuffle dataset before batching\n return_indexes: whether to return indexes of batch elements in initial dataset\n\n Yields:\n a tuple of a batch of inputs and a batch of expected outputs.\n If `return_indexes` is True, also yields indexes of batch elements.", "id": "f3206:c0:m2"} {"signature": "def main():", "body": "args = parser.parse_args()path = get_settings_path()if args.default:if populate_settings_dir(force=True):print(f'')else:print(f'')else:print(f'')", "docstring": "DeepPavlov console configuration utility.", "id": "f3214:m0"} {"signature": "def read_requirements():", "body": "reqs_path = os.path.join(__location__, '')with open(reqs_path, encoding='') as f:reqs = [line.strip() for line in f if not line.strip().startswith('')]names = []links = []for req in reqs:if '' in req:links.append(req)else:names.append(req)return {'': names, '': links}", "docstring": "parses requirements from requirements.txt", "id": "f3217:m0"} {"signature": "def is_effective_member(self, group_id, netid):", "body": "self._valid_group_id(group_id)netid = re.sub('', '', netid)url = \"\".format(self.API,group_id,netid)try:data = self._get_resource(url)return True except DataFailureException as ex:if ex.status == :return Falseelse:raise", "docstring": "Returns True if the netid is in the group, False otherwise.", "id": "f3223:c0:m10"} {"signature": "def get_group_by_id(self, group_id):", "body": "self._valid_group_id(group_id)url = \"\".format(self.API, group_id)data = self._get_resource(url)return self._group_from_json(data.get(\"\"))", "docstring": "Returns a restclients.Group object for the group identified by the\npassed group ID.", "id": "f3223:c0:m2"} {"signature": "def emit(self, record):", "body": "msg = self.format(record)if not isinstance(msg, dict):msg = json.loads(msg)self.collection.insert(msg)", "docstring": "pymongo expects a dict", "id": "f3230:c0:m1"} {"signature": "def avail_archs(self):", "body": "return {ARM32: (KS_ARCH_ARM, KS_MODE_ARM),ARM64: (KS_ARCH_ARM64, KS_MODE_LITTLE_ENDIAN),ARM_TB: (KS_ARCH_ARM, KS_MODE_THUMB),HEXAGON: (KS_ARCH_HEXAGON, KS_MODE_BIG_ENDIAN),MIPS32: (KS_ARCH_MIPS, KS_MODE_MIPS32),MIPS64: (KS_ARCH_MIPS, KS_MODE_MIPS64),PPC32: (KS_ARCH_PPC, KS_MODE_PPC32),PPC64: (KS_ARCH_PPC, KS_MODE_PPC64),SPARC32: (KS_ARCH_SPARC, KS_MODE_SPARC32),SPARC64: (KS_ARCH_SPARC, KS_MODE_SPARC64),SYSTEMZ: (KS_ARCH_SYSTEMZ, KS_MODE_BIG_ENDIAN),X86_16: (KS_ARCH_X86, KS_MODE_16),X86_32: (KS_ARCH_X86, KS_MODE_32),X86_64: (KS_ARCH_X86, KS_MODE_64),}", "docstring": "Initialize the dictionary of architectures for assembling via keystone", "id": "f3245:c0:m2"} {"signature": "def _ensure_log_handler(self):", "body": "if log.handlers:returnhandler = logging.StreamHandler()formatter = logging.Formatter('')handler.setFormatter(formatter)log.addHandler(handler)", "docstring": "If there's no log configuration, set up a default handler.", "id": "f3255:c0:m1"} {"signature": "def _add_request_data(data, request):", "body": "try:request_data = _build_request_data(request)except Exception as e:log.exception(\"\", e)else:if request_data:_filter_ip(request_data, SETTINGS[''])data[''] = request_data", "docstring": "Attempts to build request data; if successful, sets the 'request' key on `data`.", "id": "f3284:m30"} {"signature": "def _build_payload(data):", "body": "for k, v in iteritems(data):data[k] = _transform(v, key=(k,))payload = {'': SETTINGS[''],'': data}return payload", "docstring": "Returns the full payload as a string.", "id": "f3284:m46"} {"signature": "def init(access_token, environment='', scrub_fields=None, url_fields=None, **kw):", "body": "global SETTINGS, agent_log, _initialized, _transforms, _serialize_transform, _threadsif scrub_fields is not None:SETTINGS[''] = list(scrub_fields)if url_fields is not None:SETTINGS[''] = list(url_fields)SETTINGS = dict_merge(SETTINGS, kw)if _initialized:if not SETTINGS.get(''):log.warning('')returnSETTINGS[''] = access_tokenSETTINGS[''] = environmentif SETTINGS.get(''):logging.basicConfig()if SETTINGS.get('') == '':agent_log = _create_agent_log()_serialize_transform = SerializableTransform(safe_repr=SETTINGS[''][''],whitelist_types=SETTINGS[''][''])_transforms = [ScrubRedactTransform(),_serialize_transform,ScrubTransform(suffixes=[(field,) for field in SETTINGS['']], redact_char=''),ScrubUrlTransform(suffixes=[(field,) for field in SETTINGS['']], params_to_scrub=SETTINGS[''])]shortener_keys = [('', ''),('', ''),('', '', ''),('', '', ''),]if SETTINGS['']['']:shortener_keys.append(('', '', '', '', ''))shortener_keys.append(('', '', '', '', '', ''))shortener_keys.append(('', '', '', '', '', ''))shortener_keys.append(('', '', '', '', '', ''))shortener_keys.extend(SETTINGS[''])shortener = ShortenerTransform(safe_repr=SETTINGS[''][''],keys=shortener_keys,**SETTINGS[''][''])_transforms.append(shortener)_threads = queue.Queue()events.reset()filters.add_builtin_filters(SETTINGS)_initialized = True", "docstring": "Saves configuration variables in this module's SETTINGS.\n\naccess_token: project access token. Get this from the Rollbar UI:\n - click \"Settings\" in the top nav\n - click \"Projects\" in the left nav\n - copy-paste the appropriate token.\nenvironment: environment name. Can be any string; suggestions: 'production', 'development',\n 'staging', 'yourname'\n**kw: provided keyword arguments will override keys in SETTINGS.", "id": "f3284:m6"} {"signature": "def _report_message(message, level, request, extra_data, payload_data):", "body": "if not _check_config():returnfiltered_message = events.on_message(message,request=request,extra_data=extra_data,payload_data=payload_data,level=level)if filtered_message is False:returndata = _build_base_data(request, level=level)data[''] = {'': {'': filtered_message}}if extra_data:extra_data = extra_datadata[''][''].update(extra_data)request = _get_actual_request(request)_add_request_data(data, request)_add_person_data(data, request)_add_lambda_context_data(data)data[''] = _build_server_data()if payload_data:data = dict_merge(data, payload_data)payload = _build_payload(data)send_payload(payload, payload.get(''))return data['']", "docstring": "Called by report_message() wrapper", "id": "f3284:m20"} {"signature": "def _create_agent_log():", "body": "log_file = SETTINGS['']if not log_file.endswith(''):log.error(\"\"\"\")log_file = DEFAULTS['']retval = logging.getLogger('')handler = logging.FileHandler(log_file, '', '')formatter = logging.Formatter('')handler.setFormatter(formatter)retval.addHandler(handler)retval.setLevel(logging.WARNING)return retval", "docstring": "Creates .rollbar log file for use with rollbar-agent", "id": "f3284:m16"} {"signature": "def search_items(title, return_fields=None, access_token=None, endpoint=None, **search_fields):", "body": "if not title:return []if return_fields is not None:return_fields = ''.join(return_fields)return _get_api('',title=title,fields=return_fields,access_token=access_token,endpoint=endpoint,**search_fields)", "docstring": "Searches a project for items that match the input criteria.\n\ntitle: all or part of the item's title to search for.\nreturn_fields: the fields that should be returned for each item.\n e.g. ['id', 'project_id', 'status'] will return a dict containing\n only those fields for each item.\naccess_token: a project access token. If this is not provided,\n the one provided to init() will be used instead.\nsearch_fields: additional fields to include in the search.\n currently supported: status, level, environment", "id": "f3284:m11"} {"signature": "def setLevel(self, level):", "body": "self.notify_level = _checkLevel(level)", "docstring": "Override so we set the effective level for which\nlog records we notify Rollbar about instead of which\nrecords we save to the history.", "id": "f3297:c0:m1"} {"signature": "def scale_image(self, in_fname, out_fname, max_width, max_height):", "body": "try:from PIL import Imageexcept ImportError:import Imageimg = Image.open(in_fname)width_in, height_in = img.sizescale_w = max_width / float(width_in)scale_h = max_height / float(height_in)if height_in * scale_w <= max_height:scale = scale_welse:scale = scale_hif scale >= and in_fname == out_fname:returnwidth_sc = int(round(scale * width_in))height_sc = int(round(scale * height_in))img.thumbnail((width_sc, height_sc), Image.ANTIALIAS)thumb = Image.new('', (max_width, max_height), (, , ))pos_insert = ((max_width - width_sc) // , (max_height - height_sc) // )thumb.paste(img, pos_insert)thumb.save(out_fname)", "docstring": "Scales an image with the same aspect ratio centered in an\n image with a given max_width and max_height\n if in_fname == out_fname the image can only be scaled down", "id": "f3301:c0:m16"} {"signature": "@propertydef code_div(self):", "body": "code_example = self.code_exampleif code_example is None:return Nonereturn self.CODE_TEMPLATE.format(snippet=self.get_description()[], code=code_example,ref_name=self.reference)", "docstring": "The string for creating a code example for the gallery", "id": "f3301:c0:m1"} {"signature": "def copy_thumbnail_figure(self):", "body": "ret = Noneif self._thumbnail_figure is not None:if not isstring(self._thumbnail_figure):ret = self._thumbnail_figureelse:ret = osp.join(osp.dirname(self.outfile),osp.basename(self._thumbnail_figure))copyfile(self._thumbnail_figure, ret)return retelif hasattr(self.nb.metadata, ''):if not isstring(self.nb.metadata.thumbnail_figure):ret = self.nb.metadata.thumbnail_figureelse:ret = osp.join(osp.dirname(self.outfile), '',osp.basename(self.nb.metadata.thumbnail_figure))copyfile(osp.join(osp.dirname(self.infile),self.nb.metadata.thumbnail_figure),ret)return ret", "docstring": "The integer of the thumbnail figure", "id": "f3301:c0:m19"} {"signature": "def __init__(self, infile, outfile, disable_warnings=True,preprocess=True, clear=True, code_example=None,supplementary_files=None, other_supplementary_files=None,thumbnail_figure=None, url=None, insert_bokeh=False,insert_bokeh_widgets=False, tag_options={}):", "body": "self.infile = infileself.outfile = outfileself.preprocess = preprocessself.clear = clearself._code_example = code_exampleself._supplementary_files = supplementary_filesself._other_supplementary_files = other_supplementary_filesself._thumbnail_figure = thumbnail_figureself._url = urlself.insert_bokeh = insert_bokehself.insert_bokeh_widgets = insert_bokeh_widgetsself.tag_options = tag_optionsself.process_notebook(disable_warnings)self.create_thumb()", "docstring": "Parameters\n----------\ninfile: str\n path to the existing notebook\noutfile: str\n path to the new notebook\ndisable_warnings: bool\n Boolean to control whether warnings shall be included in the rst\n file or not\npreprocess: bool\n If True, the notebook is processed when generating the rst file\nclear: bool\n If True, the output in the download notebook is cleared\ncode_example: str\n A python code sample that shall be used instead of a thumbnail\n figure in the gallery. Note that you can also include a\n ``'code_example'`` key in the metadata of the notebook\nsupplementary_files: list of str\n Supplementary data files that shall be copied to the output\n directory and inserted in the rst file for download\nother_supplementary_files: list of str\n Other supplementary data files that shall be copied but not\n inserted for download\nthumbnail_figure: int\n The number of the figure that shall be used for download or a path\n to a file\nurl: str\n The url where to download the notebook\ninsert_bokeh: False or str\n The version string for bokeh to use for the style sheet\ninsert_bokeh_widgets: bool or str\n The version string for bokeh to use for the widgets style sheet\ntag_options: dict\n A dictionary with traitlets for the\n :class:`nbconvert.preprocessors.TagRemovePreprocessor`", "id": "f3301:c0:m8"} {"signature": "def create_rst(self, nb, in_dir, odir):", "body": "raw_rst, resources = nbconvert.export_by_name('', nb)rst_content = ''i0 = m = Nonebokeh_str = ''if '' in raw_rst and self.insert_bokeh:bokeh_str += self.BOKEH_TEMPLATE.format(version=self.insert_bokeh)if '' in raw_rst and self.insert_bokeh_widgets:bokeh_str += self.BOKEH_WIDGETS_TEMPLATE.format(version=self.insert_bokeh_widgets)for m in code_blocks.finditer(raw_rst):lines = m.group().splitlines(True)header, content = lines[], ''.join(lines[:])no_magics = magic_patt.sub('', content)if no_magics.strip():rst_content += (raw_rst[i0:m.start()] + bokeh_str + header + no_magics)bokeh_str = ''i0 = m.end()else:rst_content += raw_rst[i0:m.start()]i0 = m.end()if m is not None:rst_content += bokeh_str + raw_rst[m.end():]else:rst_content = raw_rstrst_content = '' % self.reference +rst_contenturl = self.urlif url is not None:rst_content += self.CODE_DOWNLOAD_NBVIEWER.format(pyfile=os.path.basename(self.py_file),nbfile=os.path.basename(self.outfile),url=url)else:rst_content += self.CODE_DOWNLOAD.format(pyfile=os.path.basename(self.py_file),nbfile=os.path.basename(self.outfile))supplementary_files = self.supplementary_filesother_supplementary_files = self.other_supplementary_filesif supplementary_files or other_supplementary_files:for f in (supplementary_files or []) + (other_supplementary_files or []):if not os.path.exists(os.path.join(odir, f)):copyfile(os.path.join(in_dir, f), os.path.join(odir, f))if supplementary_files:rst_content += self.data_download(supplementary_files)rst_file = self.get_out_file()outputs = sorted(resources[''], key=rst_content.find)base = os.path.join('', os.path.splitext(os.path.basename(self.infile))[] + '')out_map = {os.path.basename(original): base % ifor i, original in enumerate(outputs)}for original, final in six.iteritems(out_map):rst_content = rst_content.replace(original, final)with open(rst_file, '')as f:f.write(rst_content.rstrip() + '')pictures = []for original in outputs:fname = os.path.join(odir, out_map[os.path.basename(original)])pictures.append(fname)if six.PY3:f = open(fname, '')else:f = open(fname, '')f.write(resources[''][original])f.close()self.pictures = pictures", "docstring": "Create the rst file from the notebook node", "id": "f3301:c0:m11"} {"signature": "def nbviewer_link(url):", "body": "if six.PY2:from urlparse import urlparse as urlsplitelse:from urllib.parse import urlsplitinfo = urlsplit(url)domain = info.netlocurl_type = '' if domain == '' else ''return '' % (url_type, info.path)", "docstring": "Return the link to the Jupyter nbviewer for the given notebook url", "id": "f3301:m2"} {"signature": "@propertydef code_example(self):", "body": "if self._code_example is not None:return self._code_examplereturn getattr(self.nb.metadata, '', None)", "docstring": "The code example out of the notebook metadata", "id": "f3301:c0:m2"} {"signature": "@propertydef url(self):", "body": "if self._url is not None:url = self._urlelse:url = getattr(self.nb.metadata, '', None)if url is not None:return nbviewer_link(url)", "docstring": "The url on jupyter nbviewer for this notebook or None if unknown", "id": "f3301:c0:m6"} {"signature": "def get_description(self):", "body": "def split_header(s, get_header=True):s = s.lstrip().rstrip()parts = s.splitlines()if parts[].startswith(''):if get_header:header = re.sub('', '', parts.pop())if not parts:return header, ''else:header = ''rest = ''.join(parts).lstrip().split('')desc = rest[].replace('', '')return header, descelse:if get_header:if parts[].startswith(('', '')):parts = parts[:]header = parts.pop()if parts and parts[].startswith(('', '')):parts.pop()if not parts:return header, ''else:header = ''rest = ''.join(parts).lstrip().split('')desc = rest[].replace('', '')return header, descfirst_cell = self.nb[''][]if not first_cell[''] == '':return '', ''header, desc = split_header(first_cell[''])if not desc and len(self.nb['']) > :second_cell = self.nb[''][]if second_cell[''] == '':_, desc = split_header(second_cell[''], False)return header, desc", "docstring": "Get summary and description of this notebook", "id": "f3301:c0:m15"} {"signature": "def run_fastqc(job, r1_id, r2_id):", "body": "work_dir = job.fileStore.getLocalTempDir()job.fileStore.readGlobalFile(r1_id, os.path.join(work_dir, ''))parameters = ['']output_names = ['', '']if r2_id:job.fileStore.readGlobalFile(r2_id, os.path.join(work_dir, ''))parameters.extend(['', '', ''])output_names.extend(['', ''])dockerCall(job=job, tool='',workDir=work_dir, parameters=parameters)output_files = [os.path.join(work_dir, x) for x in output_names]tarball_files(tar_name='', file_paths=output_files, output_dir=work_dir)return job.fileStore.writeGlobalFile(os.path.join(work_dir, ''))", "docstring": "Run Fastqc on the input reads\n\n:param JobFunctionWrappingJob job: passed automatically by Toil\n:param str r1_id: FileStoreID of fastq read 1\n:param str r2_id: FileStoreID of fastq read 2\n:return: FileStoreID of fastQC output (tarball)\n:rtype: str", "id": "f3305:m0"} {"signature": "def run_gatk_preprocessing(job, bam, bai, ref, ref_dict, fai, g1k, mills, dbsnp, realign=False, unsafe=False):", "body": "mdups_disk = PromisedRequirement(lambda bam_, bai_: * (bam_.size + bai_.size), bam, bai)mdups = job.wrapJobFn(picard_mark_duplicates,bam,bai,cores=job.cores,disk=mdups_disk,memory=job.memory)bqsr_input_bam = mdups.rv()bqsr_input_bai = mdups.rv()genome_ref_size = ref.size + ref_dict.size + fai.sizeif realign:indel_ref_size = mills.size + g1k.size + genome_ref_sizerealigner_target_disk = PromisedRequirement(lambda bam_, bai_, ref_size:bam_.size + bai_.size + * ref_size,mdups.rv(),mdups.rv(),indel_ref_size)realigner_target = job.wrapJobFn(run_realigner_target_creator,mdups.rv(),mdups.rv(),ref, ref_dict, fai,g1k, mills,unsafe=unsafe,cores=, disk=realigner_target_disk,memory=job.memory)indel_realign_disk = PromisedRequirement(lambda bam_, bai_, intervals, ref_size: * (bam_.size + bai_.size) + intervals.size + ref_size,mdups.rv(),mdups.rv(),realigner_target.rv(),indel_ref_size)indel_realign = job.wrapJobFn(run_indel_realignment,realigner_target.rv(),mdups.rv(),mdups.rv(),ref, ref_dict, fai,g1k, mills,unsafe=unsafe,cores=, disk=indel_realign_disk,memory=job.memory)mdups.addChild(realigner_target)realigner_target.addChild(indel_realign)bqsr_input_bam = indel_realign.rv()bqsr_input_bai = indel_realign.rv()bqsr_ref_size = dbsnp.size + mills.size + genome_ref_sizebase_recal_disk = PromisedRequirement(lambda bam_, bai_, ref_size:bam_.size + bai_.size + * ref_size,bqsr_input_bam,bqsr_input_bai,bqsr_ref_size)base_recal = job.wrapJobFn(run_base_recalibration,bqsr_input_bam,bqsr_input_bai,ref, ref_dict, fai,dbsnp, mills,unsafe=unsafe,cores=job.cores,disk=base_recal_disk,memory=job.memory)recalibrate_reads_disk = PromisedRequirement(lambda bam_, bai_, recal, ref_size: * (bam_.size + bai_.size) + recal.size + ref_size,bqsr_input_bam,bqsr_input_bai,base_recal.rv(),genome_ref_size)recalibrate_reads = job.wrapJobFn(apply_bqsr_recalibration,base_recal.rv(),bqsr_input_bam,bqsr_input_bai,ref, ref_dict, fai,unsafe=unsafe,cores=job.cores,disk=recalibrate_reads_disk,memory=job.memory)job.addChild(mdups)mdups.addFollowOn(base_recal)base_recal.addChild(recalibrate_reads)return recalibrate_reads.rv(), recalibrate_reads.rv()", "docstring": "GATK Preprocessing Pipeline\n0: Mark duplicates\n1: Create INDEL realignment intervals\n2: Realign INDELs\n3: Recalibrate base quality scores\n4: Apply base score recalibration\n\n:param JobFunctionWrappingJob job: passed automatically by Toil\n:param str bam: FileStoreID for BAM file\n:param str bai: FileStoreID for BAM index file\n:param str ref: FileStoreID for reference genome fasta file\n:param str ref_dict: FileStoreID for reference sequence dictionary file\n:param str fai: FileStoreID for reference fasta index file\n:param str g1k: FileStoreID for 1000 Genomes VCF file\n:param str mills: FileStoreID for Mills VCF file\n:param str dbsnp: FileStoreID for dbSNP VCF file\n:param bool realign: If True, then runs GATK INDEL realignment\"\n:param bool unsafe: If True, runs GATK tools in UNSAFE mode: \"-U ALLOW_SEQ_DICT_INCOMPATIBILITY\"\n:return: FileStoreIDs for BAM and BAI files\n:rtype: tuple(str, str)", "id": "f3306:m14"} {"signature": "def run_cutadapt(job, r1_id, r2_id, fwd_3pr_adapter, rev_3pr_adapter):", "body": "work_dir = job.fileStore.getLocalTempDir()if r2_id:require(rev_3pr_adapter, \"\")parameters = ['', fwd_3pr_adapter,'', '']if r1_id and r2_id:job.fileStore.readGlobalFile(r1_id, os.path.join(work_dir, ''))job.fileStore.readGlobalFile(r2_id, os.path.join(work_dir, ''))parameters.extend(['', rev_3pr_adapter,'', '','', '','', ''])else:job.fileStore.readGlobalFile(r1_id, os.path.join(work_dir, ''))parameters.extend(['', '', ''])dockerCall(job=job, tool='',workDir=work_dir, parameters=parameters)if r1_id and r2_id:r1_cut_id = job.fileStore.writeGlobalFile(os.path.join(work_dir, ''))r2_cut_id = job.fileStore.writeGlobalFile(os.path.join(work_dir, ''))else:r1_cut_id = job.fileStore.writeGlobalFile(os.path.join(work_dir, ''))r2_cut_id = Nonereturn r1_cut_id, r2_cut_id", "docstring": "Adapter trimming for RNA-seq data\n\n:param JobFunctionWrappingJob job: passed automatically by Toil\n:param str r1_id: FileStoreID of fastq read 1\n:param str r2_id: FileStoreID of fastq read 2 (if paired data)\n:param str fwd_3pr_adapter: Adapter sequence for the forward 3' adapter\n:param str rev_3pr_adapter: Adapter sequence for the reverse 3' adapter (second fastq pair)\n:return: R1 and R2 FileStoreIDs\n:rtype: tuple", "id": "f3306:m0"} {"signature": "def run_samblaster(job, sam):", "body": "work_dir = job.fileStore.getLocalTempDir()job.fileStore.readGlobalFile(sam, os.path.join(work_dir, ''))command = ['','', '','', '','']start_time = time.time()dockerCall(job=job, workDir=work_dir,parameters=command,tool='')end_time = time.time()_log_runtime(job, start_time, end_time, \"\")return job.fileStore.writeGlobalFile(os.path.join(work_dir, ''))", "docstring": "Marks reads as PCR duplicates using SAMBLASTER\n\n:param JobFunctionWrappingJob job: passed automatically by Toil\n:param str sam: FileStoreID for SAM file\n:return: FileStoreID for deduped SAM file\n:rtype: str", "id": "f3306:m10"} {"signature": "def run_indel_realignment(job, intervals, bam, bai, ref, ref_dict, fai, g1k, mills, unsafe=False):", "body": "inputs = {'': ref,'': fai,'': ref_dict,'': bam,'': bai,'': intervals,'': g1k,'': mills}work_dir = job.fileStore.getLocalTempDir()for name, file_store_id in inputs.iteritems():job.fileStore.readGlobalFile(file_store_id, os.path.join(work_dir, name))parameters = ['', '','', '','', '','', '','', '','', '','', '','', str(), '', str(), '', '']if unsafe:parameters.extend(['', ''])docker_parameters = ['','', '','', ''.format(job.memory),'', ''.format(work_dir)]start_time = time.time()dockerCall(job=job, tool='',workDir=work_dir,parameters=parameters,dockerParameters=docker_parameters)end_time = time.time()_log_runtime(job, start_time, end_time, \"\")indel_bam = job.fileStore.writeGlobalFile(os.path.join(work_dir, ''))indel_bai = job.fileStore.writeGlobalFile(os.path.join(work_dir, ''))return indel_bam, indel_bai", "docstring": "Realigns BAM file at realignment target intervals\n\n:param JobFunctionWrappingJob job: passed automatically by Toil\n:param str intervals: FileStoreID for INDEL realignment intervals file\n:param str bam: FileStoreID for BAM file\n:param str bai: FileStoreID for BAM index file\n:param str ref: FileStoreID for reference genome fasta file\n:param str ref_dict: FileStoreID for reference sequence dictionary file\n:param str fai: FileStoreID for reference fasta index file\n:param str g1k: FileStoreID for 1000 Genomes VCF file\n:param str mills: FileStoreID for Mills VCF file\n:param bool unsafe: If True, runs GATK in UNSAFE mode: \"-U ALLOW_SEQ_DICT_INCOMPATIBILITY\"\n:return: FileStoreIDs for realigned BAM and BAI files\n:rtype: tuple(str, str)", "id": "f3306:m16"} {"signature": "def picard_mark_duplicates(job, bam, bai, validation_stringency=''):", "body": "work_dir = job.fileStore.getLocalTempDir()job.fileStore.readGlobalFile(bam, os.path.join(work_dir, ''))job.fileStore.readGlobalFile(bai, os.path.join(work_dir, ''))command = ['','','','','','','' % validation_stringency.upper()]docker_parameters = ['','', '','', ''.format(job.memory),'', ''.format(work_dir)]start_time = time.time()dockerCall(job=job, workDir=work_dir,parameters=command,tool='',dockerParameters=docker_parameters)end_time = time.time()_log_runtime(job, start_time, end_time, \"\")bam = job.fileStore.writeGlobalFile(os.path.join(work_dir, ''))bai = job.fileStore.writeGlobalFile(os.path.join(work_dir, ''))return bam, bai", "docstring": "Runs Picard MarkDuplicates on a BAM file. Requires that the BAM file be coordinate sorted.\n\n:param JobFunctionWrappingJob job: passed automatically by Toil\n:param str bam: FileStoreID for BAM file\n:param str bai: FileStoreID for BAM index file\n:param str validation_stringency: BAM file validation stringency, default is LENIENT\n:return: FileStoreIDs for BAM and BAI files\n:rtype: tuple", "id": "f3306:m12"} {"signature": "def run_rsem_postprocess(job, rsem_gene_id, rsem_isoform_id):", "body": "work_dir = job.fileStore.getLocalTempDir()genes = job.fileStore.readGlobalFile(rsem_gene_id, os.path.join(work_dir, ''))iso = job.fileStore.readGlobalFile(rsem_isoform_id, os.path.join(work_dir, ''))command = ['', '', '', '']dockerCall(job=job, tool='',parameters=command, workDir=work_dir)hugo_files = [os.path.join(work_dir, x) for x in ['', '']]tarball_files('', file_paths=[os.path.join(work_dir, x) for x in [genes, iso]], output_dir=work_dir)tarball_files('', file_paths=[os.path.join(work_dir, x) for x in hugo_files], output_dir=work_dir)rsem_id = job.fileStore.writeGlobalFile(os.path.join(work_dir, ''))hugo_id = job.fileStore.writeGlobalFile(os.path.join(work_dir, ''))return rsem_id, hugo_id", "docstring": "Parses RSEMs output to produce the separate .tab files (TPM, FPKM, counts) for both gene and isoform.\nThese are two-column files: Genes and Quantifications.\nHUGO files are also provided that have been mapped from Gencode/ENSEMBLE names.\n\n:param JobFunctionWrappingJob job: passed automatically by Toil\n:param str rsem_gene_id: FileStoreID of rsem_gene_ids\n:param str rsem_isoform_id: FileStoreID of rsem_isoform_ids\n:return: FileStoreID from RSEM post process tarball\n:rytpe: str", "id": "f3308:m2"} {"signature": "def run_rsem(job, bam_id, rsem_ref_url, paired=True):", "body": "work_dir = job.fileStore.getLocalTempDir()download_url(job, url=rsem_ref_url, name='', work_dir=work_dir)subprocess.check_call(['', '', os.path.join(work_dir, ''), '', work_dir])os.remove(os.path.join(work_dir, ''))rsem_files = []for root, directories, files in os.walk(work_dir):rsem_files.extend([os.path.join(root, x) for x in files])ref_prefix = [os.path.basename(os.path.splitext(x)[]) for x in rsem_files if '' in x][]ref_folder = os.path.join('', os.listdir(work_dir)[]) if len(os.listdir(work_dir)) == else ''job.fileStore.readGlobalFile(bam_id, os.path.join(work_dir, ''))output_prefix = ''parameters = ['','','', str(job.cores),'', '','', '','', '','', '',os.path.join(ref_folder, ref_prefix),output_prefix]if paired:parameters = [''] + parametersdockerCall(job=job, tool='',parameters=parameters, workDir=work_dir)gene_id = job.fileStore.writeGlobalFile(os.path.join(work_dir, output_prefix + ''))isoform_id = job.fileStore.writeGlobalFile(os.path.join(work_dir, output_prefix + ''))return gene_id, isoform_id", "docstring": "RNA quantification with RSEM\n\n:param JobFunctionWrappingJob job: Passed automatically by Toil\n:param str bam_id: FileStoreID of transcriptome bam for quantification\n:param str rsem_ref_url: URL of RSEM reference (tarball)\n:param bool paired: If True, uses parameters for paired end data\n:return: FileStoreIDs for RSEM's gene and isoform output\n:rtype: str", "id": "f3308:m1"} {"signature": "def run_mutect(job, normal_bam, normal_bai, tumor_bam, tumor_bai, ref, ref_dict, fai, cosmic, dbsnp):", "body": "work_dir = job.fileStore.getLocalTempDir()file_ids = [normal_bam, normal_bai, tumor_bam, tumor_bai, ref, fai, ref_dict, cosmic, dbsnp]file_names = ['', '', '', '', '','', '', '', '']for file_store_id, name in zip(file_ids, file_names):job.fileStore.readGlobalFile(file_store_id, os.path.join(work_dir, name))parameters = ['', '','', '','', '','', '','', '','', '','', str(), '', str(), '', '','', '','', '']dockerCall(job=job, workDir=work_dir, parameters=parameters,tool='')output_file_names = ['', '', '']output_file_paths = [os.path.join(work_dir, x) for x in output_file_names]tarball_files('', file_paths=output_file_paths, output_dir=work_dir)return job.fileStore.writeGlobalFile(os.path.join(work_dir, ''))", "docstring": "Calls MuTect to perform variant analysis\n\n:param JobFunctionWrappingJob job: passed automatically by Toil\n:param str normal_bam: Normal BAM FileStoreID\n:param str normal_bai: Normal BAM index FileStoreID\n:param str tumor_bam: Tumor BAM FileStoreID\n:param str tumor_bai: Tumor BAM Index FileStoreID\n:param str ref: Reference genome FileStoreID\n:param str ref_dict: Reference dictionary FileStoreID\n:param str fai: Reference index FileStoreID\n:param str cosmic: Cosmic VCF FileStoreID\n:param str dbsnp: DBSNP VCF FileStoreID\n:return: MuTect output (tarball) FileStoreID\n:rtype: str", "id": "f3312:m0"} {"signature": "def gatk_combine_variants(job, vcfs, ref_fasta, ref_fai, ref_dict, merge_option=''):", "body": "job.fileStore.logToMaster('')inputs = {'': ref_fasta,'': ref_fai,'': ref_dict}inputs.update(vcfs)work_dir = job.fileStore.getLocalTempDir()for name, file_store_id in inputs.iteritems():job.fileStore.readGlobalFile(file_store_id, os.path.join(work_dir, name))command = ['', '','', '','', '','', merge_option]for uuid, vcf_id in vcfs.iteritems():command.extend(['', os.path.join('', uuid)])docker_parameters = ['', '', '','', ''.format(job.memory)]dockerCall(job=job, workDir=work_dir,parameters=command,tool='',dockerParameters=docker_parameters)return job.fileStore.writeGlobalFile(os.path.join(work_dir, ''))", "docstring": "Merges VCF files using GATK CombineVariants\n\n:param JobFunctionWrappingJob job: Toil Job instance\n:param dict vcfs: Dictionary of VCF FileStoreIDs {sample identifier: FileStoreID}\n:param str ref_fasta: FileStoreID for reference genome fasta\n:param str ref_fai: FileStoreID for reference genome index file\n:param str ref_dict: FileStoreID for reference genome sequence dictionary file\n:param str merge_option: Value for --genotypemergeoption flag (Default: 'UNIQUIFY')\n 'UNIQUIFY': Multiple variants at a single site are merged into a\n single variant record.\n 'UNSORTED': Used to merge VCFs from the same sample\n:return: FileStoreID for merged VCF file\n:rtype: str", "id": "f3313:m4"} {"signature": "def run_oncotator(job, vcf_id, oncotator_db):", "body": "job.fileStore.logToMaster('')inputs = {'': vcf_id,'': oncotator_db}work_dir = job.fileStore.getLocalTempDir()for name, file_store_id in inputs.iteritems():inputs[name] = job.fileStore.readGlobalFile(file_store_id, os.path.join(work_dir, name))if tarfile.is_tarfile(inputs['']):tar = tarfile.open(inputs[''])tar.extractall(path=work_dir)inputs[''] = tar.getmembers()[].nametar.close()command = ['', '','', '','', inputs[''],'','',''] docker_parameters = ['', '', '','', ''.format(job.memory)]dockerCall(job=job, workDir=work_dir,parameters=command,tool='',dockerParameters=docker_parameters)return job.fileStore.writeGlobalFile(os.path.join(work_dir, ''))", "docstring": "Uses Oncotator to add cancer relevant variant annotations to a VCF file. Oncotator can accept\nother genome builds, but the output VCF is based on hg19.\n\n:param JobFunctionWrappingJob job: passed automatically by Toil\n:param str vcf_id: FileStoreID for VCF file\n:param str oncotator_db: FileStoreID for Oncotator database\n:return: Annotated VCF FileStoreID\n:rtype: str", "id": "f3314:m1"} {"signature": "def copy_files(file_paths, output_dir):", "body": "__forall_files(file_paths, output_dir, shutil.copy)", "docstring": "Moves files from the working directory to the output directory.\n\n:param str output_dir: Output directory\n:param list[str] file_paths: Absolute file paths to move", "id": "f3315:m4"} {"signature": "def __start_datanode(self, job):", "body": "self.hdfsContainerID = dockerCheckOutput(job=job,defer=STOP,workDir=os.getcwd(),tool=\"\",dockerParameters=[\"\",\"\",\"\", \"\"],parameters=[self.masterIP])[:-]", "docstring": "Launches the Hadoop datanode.\n\n:param job: The underlying job.", "id": "f3325:c1:m2"} {"signature": "def flatten(x):", "body": "result = []for el in x:if hasattr(el, \"\") and not isinstance(el, basestring):result.extend(flatten(el))else:result.append(el)return result", "docstring": "Flattens a nested array into a single list\n\n:param list x: The nested list/tuple to be flattened.", "id": "f3326:m0"} {"signature": "def required_length(nmin, nmax):", "body": "class RequiredLength(argparse.Action):def __call__(self, parser, args, values, option_string=None):if not nmin <= len(values) <= nmax:msg = ''.format(f=self.dest, nmin=nmin, nmax=nmax)raise argparse.ArgumentTypeError(msg)setattr(args, self.dest, values)return RequiredLength", "docstring": "For use with argparse's action argument. Allows setting a range for nargs.\nExample: nargs='+', action=required_length(2, 3)\n\n:param int nmin: Minimum number of arguments\n:param int nmax: Maximum number of arguments\n:return: RequiredLength object", "id": "f3326:m3"} {"signature": "def partitions(l, partition_size):", "body": "for i in xrange(, len(l), partition_size):yield l[i:i + partition_size]", "docstring": ">>> list(partitions([], 10))\n[]\n>>> list(partitions([1,2,3,4,5], 1))\n[[1], [2], [3], [4], [5]]\n>>> list(partitions([1,2,3,4,5], 2))\n[[1, 2], [3, 4], [5]]\n>>> list(partitions([1,2,3,4,5], 5))\n[[1, 2, 3, 4, 5]]\n\n:param list l: List to be partitioned\n:param int partition_size: Size of partitions", "id": "f3326:m1"} {"signature": "def download_url(job, url, work_dir='', name=None, s3_key_path=None, cghub_key_path=None):", "body": "file_path = os.path.join(work_dir, name) if name else os.path.join(work_dir, os.path.basename(url))if cghub_key_path:_download_with_genetorrent(job, url, file_path, cghub_key_path)elif urlparse(url).scheme == '':_s3am_with_retry(job, num_cores=, file_path=file_path, s3_url=url, mode='', s3_key_path=s3_key_path)elif urlparse(url).scheme == '':shutil.copy(urlparse(url).path, file_path)else:subprocess.check_call(['', '', '', '', '', url, '', file_path])assert os.path.exists(file_path)return file_path", "docstring": "Downloads URL, can pass in file://, http://, s3://, or ftp://, gnos://cghub/analysisID, or gnos:///analysisID\nIf downloading S3 URLs, the S3AM binary must be on the PATH\n\n:param toil.job.Job job: Toil job that is calling this function\n:param str url: URL to download from\n:param str work_dir: Directory to download file to\n:param str name: Name of output file, if None, basename of URL is used\n:param str s3_key_path: Path to 32-byte encryption key if url points to S3 file that uses SSE-C\n:param str cghub_key_path: Path to cghub key used to download from CGHub.\n:return: Path to the downloaded file\n:rtype: str", "id": "f3327:m0"} {"signature": "def _get_config_path(self):", "body": "return '' % (os.getcwd(), self._name)", "docstring": "Returns the path of a pipeline config file, without regard for its existence.", "id": "f3328:c0:m5"} {"signature": "def _create_pipeline_command(self, args, workdir_path, config_path):", "body": "return ([self._name, '', os.path.join(workdir_path, ''),'', config_path,'', workdir_path, '', '']+ ([''] if args.restart else []))", "docstring": "Creates and returns a list that represents a command for running the pipeline.", "id": "f3328:c0:m9"} {"signature": "def _add_option(self, arg_parser, name, *args, **kwargs):", "body": "arg_parser.add_argument('' + name, *args, **kwargs)", "docstring": "Add an argument to the given arg_parser with the given name.\n\n:param argparse.ArgumentParser arg_parser:\n:param str name: The name of the option.", "id": "f3328:c0:m7"} {"signature": "def __new__(cls, ctx):", "body": "return cls.__run(cls, ctx)", "docstring": "Return on call class", "id": "f3335:c0:m0"} {"signature": "def __init__(self, x=, y=, z=):", "body": "self.x = xself.y = yself.z = z", "docstring": "Create a new Vertex representing the position (x, y, z).", "id": "f3342:c0:m0"} {"signature": "def sensible_axes(self):", "body": "axes = [, , ]if self.v0.x == self.v1.x == self.v2.x:axes[] = if self.v0.y == self.v1.y == self.v2.y:axes[] = if self.v0.z == self.v1.z == self.v2.z:axes[] = u = [, , ]for i in range():if axes[i] == :u[i] = axes[i] = breakv = [, , ]for i in range():if axes[i] == :v[i] = -breakuaxis = Axis(u[], u[], u[])vaxis = Axis(v[], v[], v[])return (uaxis, vaxis)", "docstring": "Returns a sensible uaxis and vaxis for this plane.", "id": "f3342:c5:m2"} {"signature": "def top(self):", "body": "return self.brush.children[]", "docstring": "Returns the top Side of the Block.", "id": "f3345:c0:m4"} {"signature": "def render(self, template, **kwargs):", "body": "return Template(template).render(RequestContext({}, kwargs)).strip()", "docstring": "Return the rendering of a given template", "id": "f3352:c0:m4"} {"signature": "def add_direction(value, arg=u\"\"):", "body": "if arg == u'':directions = (u'', u'')elif arg == u'':directions = (u'', u'')elif arg == u'':directions = (u'', u'')else:raise template.TemplateSyntaxError('')parts = value.rsplit('', )if not len(parts):return valueelif len(parts) == :return value + directions[translation.get_language_bidi()]else:return ''.join((parts[]+directions[translation.get_language_bidi()],parts[]))", "docstring": "Adds direction to the element\n\n :arguments:\n arg\n * rtl_only: Add the direction only in case of a\n right-to-left language (default)\n * both: add the direction in both case\n * ltr_only: Add the direction only in case of a\n left-to-right language\n\n {{image_name|add_direction}} when image_name is 'start_arrow.png'\n results in 'start_arrow_rtl.png' in case of RTL language, and\n 'start_arrow.png' or 'start_arrow_ltr.png' depends on `arg` value.", "id": "f3354:m0"} {"signature": "def accuracy(output, target, topk=(,)):", "body": "with torch.no_grad():maxk = max(topk)batch_size = target.size()_, pred = output.topk(maxk, , True, True)pred = pred.t()correct = pred.eq(target[None])res = []for k in topk:correct_k = correct[:k].flatten().sum(dtype=torch.float32)res.append(correct_k * ( / batch_size))return res", "docstring": "Computes the accuracy over the k top predictions for the specified values of k", "id": "f3361:m0"} {"signature": "def __call__(self, img):", "body": "i, j, h, w = self.get_params(img, self.scale, self.ratio)return F.resized_crop(img, i, j, h, w, self.size, self.interpolation)", "docstring": "Args:\n img (PIL Image): Image to be cropped and resized.\n\nReturns:\n PIL Image: Randomly cropped and resized image.", "id": "f3371:c17:m2"} {"signature": "@staticmethoddef get_params(degrees):", "body": "angle = random.uniform(degrees[], degrees[])return angle", "docstring": "Get parameters for ``rotate`` for a random rotation.\n\n Returns:\n sequence: params to be passed to ``rotate`` for random rotation.", "id": "f3371:c23:m1"} {"signature": "def __call__(self, img):", "body": "return F.to_grayscale(img, num_output_channels=self.num_output_channels)", "docstring": "Args:\n img (PIL Image): Image to be converted to grayscale.\n\nReturns:\n PIL Image: Randomly grayscaled image.", "id": "f3371:c25:m1"} {"signature": "def __call__(self, pic):", "body": "return F.to_pil_image(pic, self.mode)", "docstring": "Args:\n pic (Tensor or numpy.ndarray): Image to be converted to PIL Image.\n\nReturns:\n PIL Image: Image converted to PIL Image.", "id": "f3371:c2:m1"} {"signature": "def __call__(self, img):", "body": "if random.random() < self.p:return F.hflip(img)return img", "docstring": "Args:\n img (PIL Image): Image to be flipped.\n\nReturns:\n PIL Image: Randomly flipped image.", "id": "f3371:c14:m1"} {"signature": "@staticmethoddef get_params(width, height, distortion_scale):", "body": "half_height = int(height / )half_width = int(width / )topleft = (random.randint(, int(distortion_scale * half_width)),random.randint(, int(distortion_scale * half_height)))topright = (random.randint(width - int(distortion_scale * half_width) - , width - ),random.randint(, int(distortion_scale * half_height)))botright = (random.randint(width - int(distortion_scale * half_width) - , width - ),random.randint(height - int(distortion_scale * half_height) - , height - ))botleft = (random.randint(, int(distortion_scale * half_width)),random.randint(height - int(distortion_scale * half_height) - , height - ))startpoints = [(, ), (width - , ), (width - , height - ), (, height - )]endpoints = [topleft, topright, botright, botleft]return startpoints, endpoints", "docstring": "Get parameters for ``perspective`` for a random perspective transform.\n\n Args:\n width : width of the image.\n height : height of the image.\n\n Returns:\n List containing [top-left, top-right, bottom-right, bottom-left] of the orignal image,\n List containing [top-left, top-right, bottom-right, bottom-left] of the transformed image.", "id": "f3371:c16:m2"} {"signature": "@staticmethoddef get_params(brightness, contrast, saturation, hue):", "body": "transforms = []if brightness is not None:brightness_factor = random.uniform(brightness[], brightness[])transforms.append(Lambda(lambda img: F.adjust_brightness(img, brightness_factor)))if contrast is not None:contrast_factor = random.uniform(contrast[], contrast[])transforms.append(Lambda(lambda img: F.adjust_contrast(img, contrast_factor)))if saturation is not None:saturation_factor = random.uniform(saturation[], saturation[])transforms.append(Lambda(lambda img: F.adjust_saturation(img, saturation_factor)))if hue is not None:hue_factor = random.uniform(hue[], hue[])transforms.append(Lambda(lambda img: F.adjust_hue(img, hue_factor)))random.shuffle(transforms)transform = Compose(transforms)return transform", "docstring": "Get a randomized transform to be applied on image.\n\n Arguments are same as that of __init__.\n\n Returns:\n Transform which randomly adjusts brightness, contrast and\n saturation in a random order.", "id": "f3371:c22:m2"} {"signature": "def resize(img, size, interpolation=Image.BILINEAR):", "body": "if not _is_pil_image(img):raise TypeError(''.format(type(img)))if not (isinstance(size, int) or (isinstance(size, Iterable) and len(size) == )):raise TypeError(''.format(size))if isinstance(size, int):w, h = img.sizeif (w <= h and w == size) or (h <= w and h == size):return imgif w < h:ow = sizeoh = int(size * h / w)return img.resize((ow, oh), interpolation)else:oh = sizeow = int(size * w / h)return img.resize((ow, oh), interpolation)else:return img.resize(size[::-], interpolation)", "docstring": "r\"\"\"Resize the input PIL Image to the given size.\n\n Args:\n img (PIL Image): Image to be resized.\n size (sequence or int): Desired output size. If size is a sequence like\n (h, w), the output size will be matched to this. If size is an int,\n the smaller edge of the image will be matched to this number maintaing\n the aspect ratio. i.e, if height > width, then image will be rescaled to\n :math:`\\left(\\text{size} \\times \\frac{\\text{height}}{\\text{width}}, \\text{size}\\right)`\n interpolation (int, optional): Desired interpolation. Default is\n ``PIL.Image.BILINEAR``\n\n Returns:\n PIL Image: Resized image.", "id": "f3373:m6"} {"signature": "def pad(img, padding, fill=, padding_mode=''):", "body": "if not _is_pil_image(img):raise TypeError(''.format(type(img)))if not isinstance(padding, (numbers.Number, tuple)):raise TypeError('')if not isinstance(fill, (numbers.Number, str, tuple)):raise TypeError('')if not isinstance(padding_mode, str):raise TypeError('')if isinstance(padding, Sequence) and len(padding) not in [, ]:raise ValueError(\"\" +\"\".format(len(padding)))assert padding_mode in ['', '', '', ''],''if padding_mode == '':if img.mode == '':palette = img.getpalette()image = ImageOps.expand(img, border=padding, fill=fill)image.putpalette(palette)return imagereturn ImageOps.expand(img, border=padding, fill=fill)else:if isinstance(padding, int):pad_left = pad_right = pad_top = pad_bottom = paddingif isinstance(padding, Sequence) and len(padding) == :pad_left = pad_right = padding[]pad_top = pad_bottom = padding[]if isinstance(padding, Sequence) and len(padding) == :pad_left = padding[]pad_top = padding[]pad_right = padding[]pad_bottom = padding[]if img.mode == '':palette = img.getpalette()img = np.asarray(img)img = np.pad(img, ((pad_top, pad_bottom), (pad_left, pad_right)), padding_mode)img = Image.fromarray(img)img.putpalette(palette)return imgimg = np.asarray(img)if len(img.shape) == :img = np.pad(img, ((pad_top, pad_bottom), (pad_left, pad_right), (, )), padding_mode)if len(img.shape) == :img = np.pad(img, ((pad_top, pad_bottom), (pad_left, pad_right)), padding_mode)return Image.fromarray(img)", "docstring": "r\"\"\"Pad the given PIL Image on all sides with specified padding mode and fill value.\n\n Args:\n img (PIL Image): Image to be padded.\n padding (int or tuple): Padding on each border. If a single int is provided this\n is used to pad all borders. If tuple of length 2 is provided this is the padding\n on left/right and top/bottom respectively. If a tuple of length 4 is provided\n this is the padding for the left, top, right and bottom borders\n respectively.\n fill: Pixel fill value for constant fill. Default is 0. If a tuple of\n length 3, it is used to fill R, G, B channels respectively.\n This value is only used when the padding_mode is constant\n padding_mode: Type of padding. Should be: constant, edge, reflect or symmetric. Default is constant.\n\n - constant: pads with a constant value, this value is specified with fill\n\n - edge: pads with the last value on the edge of the image\n\n - reflect: pads with reflection of image (without repeating the last value on the edge)\n\n padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode\n will result in [3, 2, 1, 2, 3, 4, 3, 2]\n\n - symmetric: pads with reflection of image (repeating the last value on the edge)\n\n padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode\n will result in [2, 1, 1, 2, 3, 4, 4, 3]\n\n Returns:\n PIL Image: Padded image.", "id": "f3373:m8"} {"signature": "def five_crop(img, size):", "body": "if isinstance(size, numbers.Number):size = (int(size), int(size))else:assert len(size) == , \"\"w, h = img.sizecrop_h, crop_w = sizeif crop_w > w or crop_h > h:raise ValueError(\"\".format(size,(h, w)))tl = img.crop((, , crop_w, crop_h))tr = img.crop((w - crop_w, , w, crop_h))bl = img.crop((, h - crop_h, crop_w, h))br = img.crop((w - crop_w, h - crop_h, w, h))center = center_crop(img, (crop_h, crop_w))return (tl, tr, bl, br, center)", "docstring": "Crop the given PIL Image into four corners and the central crop.\n\n .. Note::\n This transform returns a tuple of images and there may be a\n mismatch in the number of inputs and targets your ``Dataset`` returns.\n\n Args:\n size (sequence or int): Desired output size of the crop. If size is an\n int instead of sequence like (h, w), a square crop (size, size) is\n made.\n\n Returns:\n tuple: tuple (tl, tr, bl, br, center)\n Corresponding top left, top right, bottom left, bottom right and center crop.", "id": "f3373:m16"} {"signature": "def resized_crop(img, i, j, h, w, size, interpolation=Image.BILINEAR):", "body": "assert _is_pil_image(img), ''img = crop(img, i, j, h, w)img = resize(img, size, interpolation)return img", "docstring": "Crop the given PIL Image and resize it to desired size.\n\n Notably used in :class:`~torchvision.transforms.RandomResizedCrop`.\n\n Args:\n img (PIL Image): Image to be cropped.\n i (int): i in (i,j) i.e coordinates of the upper left corner\n j (int): j in (i,j) i.e coordinates of the upper left corner\n h (int): Height of the cropped image.\n w (int): Width of the cropped image.\n size (sequence or int): Desired output size. Same semantics as ``resize``.\n interpolation (int, optional): Desired interpolation. Default is\n ``PIL.Image.BILINEAR``.\n Returns:\n PIL Image: Cropped image.", "id": "f3373:m11"} {"signature": "def adjust_saturation(img, saturation_factor):", "body": "if not _is_pil_image(img):raise TypeError(''.format(type(img)))enhancer = ImageEnhance.Color(img)img = enhancer.enhance(saturation_factor)return img", "docstring": "Adjust color saturation of an image.\n\n Args:\n img (PIL Image): PIL Image to be adjusted.\n saturation_factor (float): How much to adjust the saturation. 0 will\n give a black and white image, 1 will give the original image while\n 2 will enhance the saturation by a factor of 2.\n\n Returns:\n PIL Image: Saturation adjusted image.", "id": "f3373:m20"} {"signature": "def to_pil_image(pic, mode=None):", "body": "if not(isinstance(pic, torch.Tensor) or isinstance(pic, np.ndarray)):raise TypeError(''.format(type(pic)))elif isinstance(pic, torch.Tensor):if pic.ndimension() not in {, }:raise ValueError(''.format(pic.ndimension()))elif pic.ndimension() == :pic = pic.unsqueeze()elif isinstance(pic, np.ndarray):if pic.ndim not in {, }:raise ValueError(''.format(pic.ndim))elif pic.ndim == :pic = np.expand_dims(pic, )npimg = picif isinstance(pic, torch.FloatTensor):pic = pic.mul().byte()if isinstance(pic, torch.Tensor):npimg = np.transpose(pic.numpy(), (, , ))if not isinstance(npimg, np.ndarray):raise TypeError('' +''.format(type(npimg)))if npimg.shape[] == :expected_mode = Nonenpimg = npimg[:, :, ]if npimg.dtype == np.uint8:expected_mode = ''elif npimg.dtype == np.int16:expected_mode = ''elif npimg.dtype == np.int32:expected_mode = ''elif npimg.dtype == np.float32:expected_mode = ''if mode is not None and mode != expected_mode:raise ValueError(\"\".format(mode, np.dtype, expected_mode))mode = expected_modeelif npimg.shape[] == :permitted_2_channel_modes = ['']if mode is not None and mode not in permitted_2_channel_modes:raise ValueError(\"\".format(permitted_2_channel_modes))if mode is None and npimg.dtype == np.uint8:mode = ''elif npimg.shape[] == :permitted_4_channel_modes = ['', '', '']if mode is not None and mode not in permitted_4_channel_modes:raise ValueError(\"\".format(permitted_4_channel_modes))if mode is None and npimg.dtype == np.uint8:mode = ''else:permitted_3_channel_modes = ['', '', '']if mode is not None and mode not in permitted_3_channel_modes:raise ValueError(\"\".format(permitted_3_channel_modes))if mode is None and npimg.dtype == np.uint8:mode = ''if mode is None:raise TypeError(''.format(npimg.dtype))return Image.fromarray(npimg, mode=mode)", "docstring": "Convert a tensor or an ndarray to PIL Image.\n\n See :class:`~torchvision.transforms.ToPILImage` for more details.\n\n Args:\n pic (Tensor or numpy.ndarray): Image to be converted to PIL Image.\n mode (`PIL.Image mode`_): color space and pixel depth of input data (optional).\n\n .. _PIL.Image mode: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#concept-modes\n\n Returns:\n PIL Image: Image converted to PIL Image.", "id": "f3373:m4"} {"signature": "def to_grayscale(img, num_output_channels=):", "body": "if not _is_pil_image(img):raise TypeError(''.format(type(img)))if num_output_channels == :img = img.convert('')elif num_output_channels == :img = img.convert('')np_img = np.array(img, dtype=np.uint8)np_img = np.dstack([np_img, np_img, np_img])img = Image.fromarray(np_img, '')else:raise ValueError('')return img", "docstring": "Convert image to grayscale version of image.\n\n Args:\n img (PIL Image): Image to be converted to grayscale.\n\n Returns:\n PIL Image: Grayscale version of the image.\n if num_output_channels = 1 : returned image is single channel\n\n if num_output_channels = 3 : returned image is 3 channel with r = g = b", "id": "f3373:m26"} {"signature": "def conv3x3(in_planes, out_planes, stride=, groups=, dilation=):", "body": "return nn.Conv2d(in_planes, out_planes, kernel_size=, stride=stride,padding=dilation, groups=groups, bias=False, dilation=dilation)", "docstring": "3x3 convolution with padding", "id": "f3378:m0"} {"signature": "def alexnet(pretrained=False, **kwargs):", "body": "model = AlexNet(**kwargs)if pretrained:model.load_state_dict(model_zoo.load_url(model_urls['']))return model", "docstring": "r\"\"\"AlexNet model architecture from the\n `\"One weird trick...\" `_ paper.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet", "id": "f3380:m0"} {"signature": "def vgg16(pretrained=False, **kwargs):", "body": "if pretrained:kwargs[''] = Falsemodel = VGG(make_layers(cfg['']), **kwargs)if pretrained:model.load_state_dict(model_zoo.load_url(model_urls['']))return model", "docstring": "VGG 16-layer model (configuration \"D\")\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet", "id": "f3381:m5"} {"signature": "def vgg13_bn(pretrained=False, **kwargs):", "body": "if pretrained:kwargs[''] = Falsemodel = VGG(make_layers(cfg[''], batch_norm=True), **kwargs)if pretrained:model.load_state_dict(model_zoo.load_url(model_urls['']))return model", "docstring": "VGG 13-layer model (configuration \"B\") with batch normalization\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet", "id": "f3381:m4"} {"signature": "def vgg13(pretrained=False, **kwargs):", "body": "if pretrained:kwargs[''] = Falsemodel = VGG(make_layers(cfg['']), **kwargs)if pretrained:model.load_state_dict(model_zoo.load_url(model_urls['']))return model", "docstring": "VGG 13-layer model (configuration \"B\")\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet", "id": "f3381:m3"} {"signature": "def densenet161(pretrained=False, **kwargs):", "body": "model = DenseNet(num_init_features=, growth_rate=, block_config=(, , , ),**kwargs)if pretrained:_load_state_dict(model, model_urls[''])return model", "docstring": "r\"\"\"Densenet-161 model from\n `\"Densely Connected Convolutional Networks\" `_\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet", "id": "f3382:m4"} {"signature": "def read_matches_files(data_dir, matches_file):", "body": "matches = []with open(os.path.join(data_dir, matches_file), '') as f:for line in f:line_split = line.split()matches.append([int(line_split[]), int(line_split[]),int(line_split[] == line_split[])])return torch.LongTensor(matches)", "docstring": "Return a Tensor containing the ground truth matches\n Read the file and keep only 3D point ID.\n Matches are represented with a 1, non matches with a 0.", "id": "f3391:m2"} {"signature": "def __getitem__(self, index):", "body": "if self.train:data = self.data[index]if self.transform is not None:data = self.transform(data)return datam = self.matches[index]data1, data2 = self.data[m[]], self.data[m[]]if self.transform is not None:data1 = self.transform(data1)data2 = self.transform(data2)return data1, data2, m[]", "docstring": "Args:\n index (int): Index\n\nReturns:\n tuple: (data1, data2, matches)", "id": "f3391:c0:m1"} {"signature": "def read_info_file(data_dir, info_file):", "body": "labels = []with open(os.path.join(data_dir, info_file), '') as f:labels = [int(line.split()[]) for line in f]return torch.LongTensor(labels)", "docstring": "Return a Tensor containing the list of labels\n Read the file and keep only the ID of the 3D point.", "id": "f3391:m1"} {"signature": "def __getitem__(self, index):", "body": "if self.labels is not None:img, target = self.data[index], int(self.labels[index])else:img, target = self.data[index], Noneimg = Image.fromarray(np.transpose(img, (, , )))if self.transform is not None:img = self.transform(img)if self.target_transform is not None:target = self.target_transform(target)return img, target", "docstring": "Args:\n index (int): Index\n\nReturns:\n tuple: (image, target) where target is index of the target class.", "id": "f3393:c0:m1"} {"signature": "def _check_integrity(self):", "body": "root = self.rootfpath = os.path.join(root, self.filename)if not check_integrity(fpath, self.md5_checksum):return Falsereturn True", "docstring": "Check the md5 checksum of the downloaded tarball.", "id": "f3399:c0:m3"} {"signature": "def __len__(self):", "body": "return len(self.photos)", "docstring": "The number of photos in the dataset.", "id": "f3399:c0:m2"} {"signature": "def download(self):", "body": "import tarfileif self._check_integrity():print('')returndownload_url(self.url, self.root, self.filename, self.md5_checksum)with tarfile.open(os.path.join(self.root, self.filename), '') as tar:tar.extractall(path=self.root)with open(os.path.join(self.root, '', '')) as fh:for line in fh:url = line.rstrip()try:download_url(url, os.path.join(self.root, ''))except OSError:pass", "docstring": "Download and extract the tarball, and download each individual photo.", "id": "f3399:c0:m4"} {"signature": "def list_files(root, suffix, prefix=False):", "body": "root = os.path.expanduser(root)files = list(filter(lambda p: os.path.isfile(os.path.join(root, p)) and p.endswith(suffix),os.listdir(root)))if prefix is True:files = [os.path.join(root, d) for d in files]return files", "docstring": "List all files ending with a suffix at a given root\n\n Args:\n root (str): Path to directory whose folders need to be listed\n suffix (str or tuple): Suffix of the files to match, e.g. '.png' or ('.jpg', '.png').\n It uses the Python \"str.endswith\" method and is passed directly\n prefix (bool, optional): If true, prepends the path to each result, otherwise\n only returns the name of the files found", "id": "f3400:m7"} {"signature": "def list_dir(root, prefix=False):", "body": "root = os.path.expanduser(root)directories = list(filter(lambda p: os.path.isdir(os.path.join(root, p)),os.listdir(root)))if prefix is True:directories = [os.path.join(root, d) for d in directories]return directories", "docstring": "List all directories at a given root\n\n Args:\n root (str): Path to directory whose folders need to be listed\n prefix (bool, optional): If true, prepends the path to each result, otherwise\n only returns the name of the directories found", "id": "f3400:m6"} {"signature": "def download_file_from_google_drive(file_id, root, filename=None, md5=None):", "body": "import requestsurl = \"\"root = os.path.expanduser(root)if not filename:filename = file_idfpath = os.path.join(root, filename)makedir_exist_ok(root)if os.path.isfile(fpath) and check_integrity(fpath, md5):print('' + fpath)else:session = requests.Session()response = session.get(url, params={'': file_id}, stream=True)token = _get_confirm_token(response)if token:params = {'': file_id, '': token}response = session.get(url, params=params, stream=True)_save_response_content(response, fpath)", "docstring": "Download a Google Drive file from and place it in root.\n\n Args:\n file_id (str): id of file to be downloaded\n root (str): Directory to place downloaded file in\n filename (str, optional): Name to save the file under. If None, use the id of the file.\n md5 (str, optional): MD5 checksum of the download. If None, do not check", "id": "f3400:m8"} {"signature": "def download_url(url, root, filename=None, md5=None):", "body": "from six.moves import urllibroot = os.path.expanduser(root)if not filename:filename = os.path.basename(url)fpath = os.path.join(root, filename)makedir_exist_ok(root)if os.path.isfile(fpath) and check_integrity(fpath, md5):print('' + fpath)else:try:print('' + url + '' + fpath)urllib.request.urlretrieve(url, fpath,reporthook=gen_bar_updater())except OSError:if url[:] == '':url = url.replace('', '')print('''' + url + '' + fpath)urllib.request.urlretrieve(url, fpath,reporthook=gen_bar_updater())", "docstring": "Download a file from a url and place it in root.\n\n Args:\n url (str): URL to download file from\n root (str): Directory to place downloaded file in\n filename (str, optional): Name to save the file under. If None, use the basename of the URL\n md5 (str, optional): MD5 checksum of the download. If None, do not check", "id": "f3400:m5"} {"signature": "def __getitem__(self, index):", "body": "path, target = self.samples[index]sample = self.loader(path)if self.transform is not None:sample = self.transform(sample)if self.target_transform is not None:target = self.target_transform(target)return sample, target", "docstring": "Args:\n index (int): Index\n\nReturns:\n tuple: (sample, target) where target is class_index of the target class.", "id": "f3402:c0:m2"} {"signature": "@classmethoddef from_str(cls, human_readable_str, decimal=False, bits=False):", "body": "divisor = if decimal else num = []c = \"\"for c in human_readable_str:if c not in cls.digits:breaknum.append(c)num = \"\".join(num)try:num = int(num)except ValueError:num = float(num)if bits:num /= return cls(round(num * divisor ** cls.key[c.lower()]))", "docstring": "attempt to parse a size in bytes from a human-readable string.", "id": "f3408:c2:m3"} {"signature": "def deepupdate(mapping: abc.MutableMapping, other: abc.Mapping, listextend=False):", "body": "def inner(other, previouskeys):\"\"\"\"\"\"for key, value in other.items():if isinstance(value, abc.Mapping):inner(value, (*previouskeys, key))else:node = mappingfor previouskey in previouskeys:node = node.setdefault(previouskey, {})target = node.get(key)if (listextendand isinstance(target, abc.MutableSequence)and isinstance(value, abc.Sequence)):target.extend(value)else:node[key] = valueinner(other, ())", "docstring": "update one dictionary from another recursively. Only individual\n values will be overwritten--not entire branches of nested\n dictionaries.", "id": "f3408:m6"} {"signature": "@register.simple_tag@silence_without_namespacedef project_home_url(*args):", "body": "url = home_url()if url:return url", "docstring": "A template tag to return the project's home URL.\n\n PROJECT_HOME_NAMESPACE must be defined in settings.\n For example:\n PROJECT_HOME_NAMESPACE = 'project_name:index_view'\n\n Usage Example:\n {% load project_home_tags %}\n\n Home", "id": "f3411:m2"} {"signature": "def _refresh_access_token(self):", "body": "resource = \"\"fields = dict(grant_type=\"\", refresh_token=self.refresh_token,client_id=self.auth.client_id, client_secret=self.auth.client_secret,format=\"\")status, data = self._handle_response(\"\", resource, fields=fields, refresh_access_token=False)if \"\" in data:self.access_token = data[\"\"]if callable(self.access_token_refreshed_callback):self.access_token_refreshed_callback(self.access_token)return Truereturn False", "docstring": "If the client application has a refresh token, it can use it to send a request for a new access token. \n\nTo ask for a new access token, the client application should send a POST request to https://login.instance_name/services/oauth2/token with the following query parameters:\n\n grant_type: Value must be refresh_token for this flow.\n refresh_token: The refresh token the client application already received. \n client_id: Consumer key from the remote access application definition.\n client_secret: Consumer secret from the remote access application definition.\n format: Expected return format. This parameter is optional. The default is json. Values are:\n\n * urlencoded\n * json\n * xml\n\ne.g.\n\n $ curl -i --form grant_type=refresh_token \\\n --form refresh_token= \\\n --form client_id= \\\n --form client_secret= \\\n --form format=json \\\n https://na1.salesforce.com/services/oauth2/token", "id": "f3414:c1:m6"} {"signature": "def _put_information(self):", "body": "self.session._add_object()self.session._out('')self.session._out('' + self._text_to_string(''))if self.title:self.session._out('' + self._text_to_string(self.title))if self.subject:self.session._out('' + self._text_to_string(self.subject))if self.author:self.session._out('' + self._text_to_string(self.author))if self.keywords:self.session._out('' +self._text_to_string(self.keywords))if self.creator:self.session._out('' + self._text_to_string(self.creator))self.session._out('' + self._text_to_string('' + datetime.now().strftime('')))self.session._out('')self.session._out('')", "docstring": "PDF Information object.", "id": "f3453:c0:m12"} {"signature": "def _put_trailer(self):", "body": "startxref = len(self.session.buffer)self._put_cross_reference()md5 = hashlib.md5()md5.update(datetime.now().strftime(''))try:md5.update(self.filepath)except TypeError:passif self.title:md5.update(self.title)if self.subject:md5.update(self.subject)if self.author:md5.update(self.author)if self.keywords:md5.update(self.keywords)if self.creator:md5.update(self.creator)objnum = len(self.session.objects)self.session._out('')self.session._out('')self.session._out('' % objnum)self.session._out('' % (objnum - ))self.session._out('' % (objnum - ))self.session._out('' % (md5.hexdigest(),md5.hexdigest()))self.session._out('')self.session._out('')self.session._out(startxref)self.session._out('')", "docstring": "Final Trailer calculations, and end-of-file\n reference.", "id": "f3453:c0:m15"} {"signature": "def _put_resource_dict(self):", "body": "self.session._add_object()self.session._out('')self.session._out('')self.session._out('')for font in self.document.fonts:self.session._out('' % (font.index, font.number))self.session._out('')if self.document.images:self.session._out('')for image in self.document.images:self.session._out('' % (image.index, image.number))self.session._out('')self.session._out('')self.session._out('')", "docstring": "Creates PDF reference to resource objects.", "id": "f3453:c0:m11"} {"signature": "def set_display_mode(self, zoom='', layout=''):", "body": "self.zoom_options = [\"\", \"\", \"\", \"\"]self.layout_options = [\"\", \"\", \"\", \"\"]if zoom in self.zoom_options or (isinstance(zoom, int) and < zoom <= ):self.zoom_mode = zoomelse:raise Exception('' + zoom)if layout in self.layout_options:self.layout_mode = layoutelse:raise Exception('' + layout)", "docstring": "Set the default viewing options.", "id": "f3453:c0:m4"} {"signature": "def add_page(self, page=None):", "body": "if page is None:self.page = PDFPage(self.orientation_default, self.layout_default, self.margins)else:self.page = pageself.page._set_index(len(self.pages))self.pages.append(self.page)currentfont = self.fontself.set_font(font=currentfont)self.session._reset_colors()", "docstring": "May generate and add a PDFPage separately, or use this to generate\n a default page.", "id": "f3454:c0:m8"} {"signature": "def get_page(self):", "body": "return self.page", "docstring": "Returns reference to current page object.", "id": "f3454:c0:m9"} {"signature": "def _get_orientation_changes(self):", "body": "self.orientation_changes = []for page in self.pages:if page.orientation_change is True:self.orientation_changes.append(page.index)else:passreturn self.orientation_changes", "docstring": "Returns a list of the pages that have\n orientation changes.", "id": "f3454:c0:m64"} {"signature": "def _set_index(self, index=):", "body": "self.index = index", "docstring": "Image number", "id": "f3463:c0:m2"} {"signature": "@propertydef x_left(self):", "body": "return self.xmax - self.x", "docstring": "Space remaining on line.", "id": "f3467:c0:m11"} {"signature": "def __init__(self, x=, y=, boundary_flag=False):", "body": "self._x = self._y = if boundary_flag is True:self.set_bounds(xmin=x, ymin=y)else:self.set_bounds()self.set_deltas()self.x = xself.y = y", "docstring": "Cursor object. Initialize with placement of cursor.\n This cursor places the origin (0,) at the upper left hand\n side of the page. X increases horizontally, and y increases\n vertically down the page. Note: This is different from the way\n Adobe sets the origin. They place the origin at the lower\n left hand corner, and y increases from the bottom to the\n top of the page. I feel that their way required more explanation,\n and was less intuitive. Therefore, I have provided the y_prime\n property for use in outputting the cursor data at document\n generation.", "id": "f3467:c0:m0"} {"signature": "def _set_style(self, style=None):", "body": "if style is None:self.style = ''self.underline = Falseelif self.family == ('' or ''):self.style = ''self.underline = Falseself.style = style.upper()if '' in self.style or self.style == '':self.underline = Trueelse:self.underline = False", "docstring": "Style should be a string, containing the letters 'B' for bold,\n 'U' for underline, or 'I' for italic, or should be '', for no style.\n Symbol will not be underlined. The underline style can further be\n modified by specifying the underline thickness and position.", "id": "f3469:c0:m4"} {"signature": "def set_page_size(self, layout):", "body": "self.layout = layout.lower()if self.layout in self.layout_dict:self.page_size = self.layout_dict[self.layout]else:dimensions = self.layout.split('')if len(dimensions) == :self.page_size = (float(dimensions[]) * , float(dimensions[]) * )else:raise IndexError(\"\" % len(dimensions))", "docstring": "Valid choices: 'a3, 'a4', 'a5', 'letter', 'legal', '11x17'.", "id": "f3476:c0:m2"} {"signature": "def _compress(self):", "body": "self.buffer = compress(self.buffer)", "docstring": "Uses zlib to compress page buffers. Compression\n option is enabled through PDFLite object's\n setCompression method.", "id": "f3476:c0:m5"} {"signature": "def __init__(self, session, page):", "body": "self.session = sessionself.page = pageself._currentMatrix = (, , , , , )self._textMatrix = (, , , , , )", "docstring": "Constructor", "id": "f3485:c0:m0"} {"signature": "def _draw(self):", "body": "self._compile()self.rows[]._advance_first_row()self._set_borders()self._draw_fill()self._draw_borders()self._draw_text()self._set_final_cursor()", "docstring": "Don't use this, use document.draw_table", "id": "f3489:c0:m10"} {"signature": "def gzip(filename):", "body": "retcode, output = sh('' % filename)new_filename = filename+''return (retcode, output, new_filename)", "docstring": "Gzip a file\n returns a 3-tuple with returncode (integer), terminal output (string)\n and the new filename.", "id": "f3493:m2"} {"signature": "def chmod(path, mode, recursive=True):", "body": "if recursive:cmd = '' % (mode, path)else:cmd = '' % (mode, path)return sh(cmd)", "docstring": "alternative to os.", "id": "f3493:m5"} {"signature": "def db_list(username=None, password=None, host=None, port=None):", "body": "conn = _connection(username=username, password=password, host=host, port=port)cur = conn.cursor()cur.execute('')rows = cur.fetchall()conn.close()result = []for row in rows:result.append(row[])return result", "docstring": "returns a list of all databases on this server", "id": "f3494:m2"} {"signature": "def get_policy(self, id):", "body": "return self.request(\"\", id, method=\"\").json()", "docstring": "Get a spacific policy.\n\n https://www.nomadproject.io/api/sentinel-policies.html\n\n returns: dict\n\n raises:\n - nomad.api.exceptions.BaseNomadException\n - nomad.api.exceptions.URLNotFoundNomadException", "id": "f3524:c0:m6"} {"signature": "def plan_job(self, id, job, diff=False, policy_override=False):", "body": "json_dict = {}json_dict.update(job)json_dict.setdefault('', diff)json_dict.setdefault('', policy_override)return self.request(id, \"\", json=json_dict, method=\"\").json()", "docstring": "Invoke a dry-run of the scheduler for the job.\n\n https://www.nomadproject.io/docs/http/job.html\n\n arguments:\n - id\n - job, dict\n - diff, boolean\n - policy_override, boolean\n returns: dict\n raises:\n - nomad.api.exceptions.BaseNomadException\n - nomad.api.exceptions.URLNotFoundNomadException", "id": "f3528:c0:m15"} {"signature": "def get_allocations(self, id):", "body": "return self.request(id, \"\", method=\"\").json()", "docstring": "Query the allocations belonging to a single job.\n\n https://www.nomadproject.io/docs/http/job.html\n\n arguments:\n - id\n returns: list\n raises:\n - nomad.api.exceptions.BaseNomadException\n - nomad.api.exceptions.URLNotFoundNomadException", "id": "f3528:c0:m8"} {"signature": "def get_summary(self, id):", "body": "return self.request(id, \"\", method=\"\").json()", "docstring": "Query the summary of a job.\n\n https://www.nomadproject.io/docs/http/job.html\n\n arguments:\n - id\n returns: dict\n raises:\n - nomad.api.exceptions.BaseNomadException\n - nomad.api.exceptions.URLNotFoundNomadException", "id": "f3528:c0:m12"} {"signature": "def periodic_job(self, id):", "body": "return self.request(id, \"\", \"\", method=\"\").json()", "docstring": "Forces a new instance of the periodic job. A new instance will be\n created even if it violates the job's prohibit_overlap settings.\n As such, this should be only used to immediately\n run a periodic job.\n\n https://www.nomadproject.io/docs/http/job.html\n\n arguments:\n - id\n returns: dict\n raises:\n - nomad.api.exceptions.BaseNomadException\n - nomad.api.exceptions.URLNotFoundNomadException", "id": "f3528:c0:m16"} {"signature": "def get_versions(self, id):", "body": "return self.request(id, \"\", method=\"\").json()", "docstring": "This endpoint reads information about all versions of a job.\n\n https://www.nomadproject.io/docs/http/job.html\n\n arguments:\n - id\n returns: list of dicts\n raises:\n - nomad.api.exceptions.BaseNomadException\n - nomad.api.exceptions.URLNotFoundNomadException", "id": "f3528:c0:m7"} {"signature": "def stable_job(self, id, version, stable):", "body": "revert_json = {\"\": id,\"\": version,\"\": stable}return self.request(id, \"\", json=revert_json, method=\"\").json()", "docstring": "This endpoint sets the job's stability.\n\n https://www.nomadproject.io/docs/http/job.html\n\n arguments:\n - id\n - version, Specifies the job version to revert to.\n - stable, Specifies whether the job should be marked as stable or not.\n returns: dict\n raises:\n - nomad.api.exceptions.BaseNomadException\n - nomad.api.exceptions.URLNotFoundNomadException", "id": "f3528:c0:m19"} {"signature": "def stream(self, id, offset, origin, path=\"\"):", "body": "params = {\"\": path,\"\": offset,\"\": origin}return self.request(id, params=params, method=\"\").text", "docstring": "This endpoint streams the contents of a file in an allocation directory.\n\n https://www.nomadproject.io/api/client.html#stream-file\n\n arguments:\n - id: (str) allocation_id required\n - offset: (int) required\n - origin: (str) either start|end\n - path: (str) optional\n returns: (str) text\n raises:\n - nomad.api.exceptions.BaseNomadException\n - nomad.api.exceptions.BadRequestNomadException", "id": "f3529:c4:m1"} {"signature": "def read_file(self, id=None, path=\"\"):", "body": "if id:return self.request(id, params={\"\": path}, method=\"\").textelse:return self.request(params={\"\": path}, method=\"\").text", "docstring": "Read contents of a file in an allocation directory.\n\n https://www.nomadproject.io/docs/http/client-fs-cat.html\n\n arguments:\n - id\n - path\n returns: (str) text\n raises:\n - nomad.api.exceptions.BaseNomadException\n - nomad.api.exceptions.URLNotFoundNomadException", "id": "f3529:c2:m1"} {"signature": "def stream(self, id, task, type, follow=False, offset=, origin=\"\", plain=False):", "body": "params = {\"\": task,\"\": type,\"\": follow,\"\": offset,\"\": origin,\"\": plain}return self.request(id, params=params, method=\"\").text", "docstring": "This endpoint streams a task's stderr/stdout logs.\n\n https://www.nomadproject.io/api/client.html#stream-logs\n\n arguments:\n - id: (str) allocation_id required\n - task: (str) name of the task inside the allocation to stream logs from\n - type: (str) Specifies the stream to stream. Either \"stderr|stdout\"\n - follow: (bool) default false\n - offset: (int) default 0\n - origin: (str) either start|end, default \"start\"\n - plain: (bool) Return just the plain text without framing. default False\n returns: (str) text\n raises:\n - nomad.api.exceptions.BaseNomadException\n - nomad.api.exceptions.BadRequestNomadException", "id": "f3529:c5:m1"} {"signature": "def read_allocation_stats(self, id):", "body": "return self.request(id, \"\", method=\"\").json()", "docstring": "Query the actual resources consumed by an allocation.\n\n https://www.nomadproject.io/api/client.html#read-allocation\n\n arguments:\n returns: dict\n raises:\n - nomad.api.exceptions.BaseNomadException\n - nomad.api.exceptions.URLNotFoundNomadException", "id": "f3529:c8:m1"} {"signature": "def garbage_collect(self, node_id=None):", "body": "self.request(params={\"\": node_id}, method=\"\")", "docstring": "This endpoint forces a garbage collection of all stopped allocations on a node.\n\n https://www.nomadproject.io/api/client.html#gc-all-allocation\n\n arguments:\n - node_id: (str) full allocation_id\n raises:\n - nomad.api.exceptions.BaseNomadException", "id": "f3529:c10:m1"} {"signature": "def list_files(self, id=None, path=\"\"):", "body": "if id:return self.request(id, params={\"\": path}, method=\"\").json()else:return self.request(params={\"\": path}, method=\"\").json()", "docstring": "List files in an allocation directory.\n\n https://www.nomadproject.io/docs/http/client-fs-ls.html\n\n arguments:\n - id\n - path \n returns: list\n raises:\n - nomad.api.exceptions.BaseNomadException\n - nomad.api.exceptions.URLNotFoundNomadException", "id": "f3529:c1:m1"} {"signature": "def read_file_offset(self, id, offset, limit, path=\"\"):", "body": "params = {\"\": path,\"\": offset,\"\": limit}return self.request(id, params=params, method=\"\").text", "docstring": "Read contents of a file in an allocation directory.\n\n https://www.nomadproject.io/docs/http/client-fs-cat.html\n\n arguments:\n - id: (str) allocation_id required\n - offset: (int) required\n - limit: (int) required\n - path: (str) optional\n returns: (str) text\n raises:\n - nomad.api.exceptions.BaseNomadException\n - nomad.api.exceptions.BadRequestNomadException", "id": "f3529:c3:m1"} {"signature": "def get_deployment_allocations(self, id):", "body": "return self.request(\"\", id, method=\"\").json()", "docstring": "This endpoint lists the allocations created or modified for the given deployment.\n\n https://www.nomadproject.io/docs/http/deployments.html\n\n arguments:\n - id\n returns: list of dicts\n raises:\n - nomad.api.exceptions.BaseNomadException\n - nomad.api.exceptions.URLNotFoundNomadException", "id": "f3531:c0:m7"} {"signature": "def get_deployment(self, id):", "body": "return self.request(id, method=\"\").json()", "docstring": "This endpoint reads information about a specific deployment by ID.\n\n https://www.nomadproject.io/docs/http/deployments.html\n\n arguments:\n - id\n returns: dict\n raises:\n - nomad.api.exceptions.BaseNomadException\n - nomad.api.exceptions.URLNotFoundNomadException", "id": "f3531:c0:m6"} {"signature": "def deployment_allocation_health(self, id, healthy_allocations=list(), unhealthy_allocations=list()):", "body": "allocations = {\"\": healthy_allocations,\"\": unhealthy_allocations,\"\": id}return self.request(\"\", id, json=allocations, method=\"\").json()", "docstring": "This endpoint is used to set the health of an allocation that is in the deployment manually. In some use\n cases, automatic detection of allocation health may not be desired. As such those task groups can be marked\n with an upgrade policy that uses health_check = \"manual\". Those allocations must have their health marked\n manually using this endpoint. Marking an allocation as healthy will allow the rolling upgrade to proceed.\n Marking it as failed will cause the deployment to fail.\n\n https://www.nomadproject.io/docs/http/deployments.html\n\n arguments:\n - id\n - healthy_allocations, Specifies the set of allocation that should be marked as healthy.\n - unhealthy_allocations, Specifies the set of allocation that should be marked as unhealthy.\n returns: dict\n raises:\n - nomad.api.exceptions.BaseNomadException\n - nomad.api.exceptions.URLNotFoundNomadException", "id": "f3531:c0:m12"} {"signature": "def register_job(self, job):", "body": "return self.request(json=job, method=\"\").json()", "docstring": "Register a job with Nomad.\n\n https://www.nomadproject.io/docs/http/jobs.html\n\n returns: dict\n raises:\n - nomad.api.exceptions.BaseNomadException\n - nomad.api.exceptions.URLNotFoundNomadException", "id": "f3532:c0:m9"} {"signature": "def get_self_token(self):", "body": "return self.request(\"\", \"\", method=\"\").json()", "docstring": "Retrieve self token used for auth.\n\n https://www.nomadproject.io/api/acl-tokens.html\n\n returns: dict\n\n raises:\n - nomad.api.exceptions.BaseNomadException\n - nomad.api.exceptions.URLNotFoundNomadException", "id": "f3538:c0:m7"} {"signature": "def get_tokens(self):", "body": "return self.request(\"\", method=\"\").json()", "docstring": "Get a list of tokens.\n\n https://www.nomadproject.io/api/acl-tokens.html\n\n returns: list\n\n raises:\n - nomad.api.exceptions.BaseNomadException\n - nomad.api.exceptions.URLNotFoundNomadException", "id": "f3538:c0:m5"} {"signature": "def get_token(self, id):", "body": "return self.request(\"\", id, method=\"\").json()", "docstring": "Retrieve specific token.\n\n https://www.nomadproject.io/api/acl-tokens.html\n\n returns: dict\n\n raises:\n - nomad.api.exceptions.BaseNomadException\n - nomad.api.exceptions.URLNotFoundNomadException", "id": "f3538:c0:m6"} {"signature": "def create_token(self, token):", "body": "return self.request(\"\", json=token, method=\"\").json()", "docstring": "Create token.\n\n https://www.nomadproject.io/api/acl-tokens.html\n\n arguments:\n token\n returns: dict\n\n raises:\n - nomad.api.exceptions.BaseNomadException\n - nomad.api.exceptions.URLNotFoundNomadException", "id": "f3538:c0:m8"} {"signature": "def get_allocations(self, prefix=None):", "body": "params = {\"\": prefix}return self.request(method=\"\", params=params).json()", "docstring": "Lists all the allocations.\n\n https://www.nomadproject.io/docs/http/allocs.html\n arguments:\n - prefix :(str) optional, specifies a string to filter allocations on based on an prefix.\n This is specified as a querystring parameter.\n returns: list\n raises:\n - nomad.api.exceptions.BaseNomadException\n - nomad.api.exceptions.URLNotFoundNomadException", "id": "f3539:c0:m6"} {"signature": "def get_evaluation(self, id):", "body": "return self.request(id, method=\"\").json()", "docstring": "Query a specific evaluation.\n\n https://www.nomadproject.io/docs/http/eval.html\n\n arguments:\n - id\n returns: dict\n raises:\n - nomad.api.exceptions.BaseNomadException\n - nomad.api.exceptions.URLNotFoundNomadException", "id": "f3543:c0:m6"} {"signature": "def __init__(self, host='', secure=False, port=,address=os.getenv('', None),namespace=os.getenv('', None),token=os.getenv('', None), timeout=, region=os.getenv('', None), version='', verify=False, cert=()):", "body": "self.host = hostself.secure = secureself.port = portself.address = addressself.region = regionself.timeout = timeoutself.version = versionself.token = tokenself.verify = verifyself.cert = certself.__namespace = namespaceself.requester_settings = {\"\": self.address,\"\": self.get_uri(),\"\": self.port,\"\": self.__namespace,\"\": self.token,\"\": self.timeout,\"\": self.version,\"\": self.verify,\"\": self.cert,\"\": self.region}self._jobs = api.Jobs(**self.requester_settings)self._job = api.Job(**self.requester_settings)self._nodes = api.Nodes(**self.requester_settings)self._node = api.Node(**self.requester_settings)self._allocations = api.Allocations(**self.requester_settings)self._allocation = api.Allocation(**self.requester_settings)self._evaluations = api.Evaluations(**self.requester_settings)self._evaluation = api.Evaluation(**self.requester_settings)self._agent = api.Agent(**self.requester_settings)self._client = api.Client(**self.requester_settings)self._deployments = api.Deployments(**self.requester_settings)self._deployment = api.Deployment(**self.requester_settings)self._regions = api.Regions(**self.requester_settings)self._status = api.Status(**self.requester_settings)self._system = api.System(**self.requester_settings)self._operator = api.Operator(**self.requester_settings)self._validate = api.Validate(**self.requester_settings)self._namespaces = api.Namespaces(**self.requester_settings)self._namespace = api.Namespace(**self.requester_settings)self._acl = api.Acl(**self.requester_settings)self._sentinel = api.Sentinel(**self.requester_settings)self._metrics = api.Metrics(**self.requester_settings)", "docstring": "Nomad api client\n\n https://github.com/jrxFive/python-nomad/\n\n optional arguments:\n - host (defaults 127.0.0.1), string ip or name of the nomad api server/agent that will be used.\n - port (defaults 4646), integer port that will be used to connect.\n - secure (defaults False), define if the protocol is secured or not (https or http)\n - version (defaults v1), vesion of the api of nomad.\n - verify (defaults False), verify the certificate when tls/ssl is enabled\n at nomad.\n - cert (defaults empty), cert, or key and cert file to validate the certificate\n configured at nomad.\n - region (defaults None), version of the region to use. It will be used then\n regions of the current agent of the connection.\n - namespace (defaults to None), Specifies the enterpise namespace that will\n be use to deploy or to ask info to nomad.\n - token (defaults to None), Specifies to append ACL token to the headers to\n make authentication on secured based nomad environemnts.\n returns: Nomad api client object\n\n raises:\n - nomad.api.exceptions.BaseNomadException\n - nomad.api.exceptions.URLNotFoundNomadException\n - nomad.api.exceptions.URLNotAuthorizedNomadException", "id": "f3548:c0:m0"} {"signature": "def query_yes_quit(question, default=\"\"):", "body": "valid = {\"\": Answers.YES, \"\": Answers.YES, \"\": Answers.YES,\"\": Answers.QUIT, \"\": Answers.QUIT}if default is None:prompt = \"\"elif default == \"\":prompt = \"\"elif default == \"\":prompt = \"\"else:raise ValueError(\"\" % default)while True:sys.stdout.write(question + prompt)choice = input().lower()if default is not None and choice == '':return valid[default]elif choice in valid:return valid[choice]else:sys.stdout.write(\"\"\"\")", "docstring": "Ask a yes/quit question via raw_input() and return their answer.\n\n \"question\" is a string that is presented to the user.\n \"default\" is the presumed answer if the user just hits .\n It must be \"yes\" (the default), \"quit\" or None (meaning\n an answer is required of the user).\n\n The \"answer\" return value is one of \"yes\" or \"quit\".\n\n Modified from\n http://stackoverflow.com/questions/3041986/python-command-line-yes-no-input", "id": "f3561:m6"} {"signature": "def get_terminal_size():", "body": "try:try:from shutil import get_terminal_size as _get_terminal_size except ImportError:from backports.shutil_get_terminal_size import get_terminal_size as _get_terminal_size sz = _get_terminal_size()except ValueError:\"\"\"\"\"\"terminal_size = namedtuple('', '')sz = terminal_size(, )return sz", "docstring": "Returns terminal dimensions\n :return: Returns ``(width, height)``. If there's no terminal\n to be found, we'll just return ``(80, 24)``.", "id": "f3561:m12"} {"signature": "def query_yes_no(question, default=\"\"):", "body": "valid = {\"\": Answers.YES, \"\": Answers.YES, \"\": Answers.YES,\"\": Answers.NO, \"\": Answers.NO}if default is None:prompt = \"\"elif default == \"\":prompt = \"\"elif default == \"\":prompt = \"\"else:raise ValueError(\"\" % default)while True:sys.stdout.write(question + prompt)choice = input().lower()if default is not None and choice == '':return valid[default]elif choice in valid:return valid[choice]else:sys.stdout.write(\"\"\"\")", "docstring": "Ask a yes/no question via raw_input() and return their answer.\n\n \"question\" is a string that is presented to the user.\n \"default\" is the presumed answer if the user just hits .\n It must be \"yes\" (the default), \"no\" or None (meaning\n an answer is required of the user).\n\n The return value is one of Answers.YES or Answers.NO.\n\n Copied (and modified) from\n http://stackoverflow.com/questions/3041986/python-command-line-yes-no-input", "id": "f3561:m3"} {"signature": "def find_meta(*meta_file_parts, meta_key):", "body": "meta_file = read(*meta_file_parts)meta_match = re.search(r\"\".format(meta_key),meta_file, re.M)if meta_match:return meta_match.group()raise RuntimeError(\"\".format(meta_key))", "docstring": "Extract __*meta*__ from meta_file", "id": "f3566:m1"} {"signature": "def write_inp(self):", "body": "template = self.get_template()return template.substitute({\"\": self.__class__.__name__,\"\": self.label}).strip()", "docstring": "Returns the material definition as a string in Abaqus INP format.", "id": "f3628:c0:m2"} {"signature": "def nvert(self):", "body": "return self.elements.type.argiope.map(lambda t: ELEMENTS[t].nvert)", "docstring": "Returns the number of vertices of eache element according to its type/", "id": "f3648:c5:m8"} {"signature": "def write_inp(mesh, path = None, maxwidth = , sections = \"\"):", "body": "def set_to_inp(sets, keyword):ss = \"\"for sk in sets.keys():labels = sets[sk].loc[sets[sk]].index.valueslabels = list(labels)labels.sort()if len(labels)!= :ss += \"\".format(keyword, sk)ss += argiope.utils.list_to_string(labels) + \"\"return ss.strip() mesh = mesh.copy()nodes_output = (mesh.nodes.coords.to_csv(header = False).split())nodes_output = (\"\".join([\"\" + s.replace(\"\", \"\") for s in nodes_output]))if \"\" in mesh.nodes.columns.levels[]: nsets = set_to_inp(mesh.nodes.sets, \"\")else:nsets = \"\"surf_output = []if \"\" in mesh.elements.keys():sk = mesh.elements.surfaces.keys()for sindex in np.unique(sk.labels[]):slabel = sk.levels[][sindex]surface = mesh.elements.surfaces[slabel]if surface.values.sum() != :mesh.surface_to_element_sets(slabel)surf_output.append( \"\".format(slabel))for findex in surface.keys():if surface[findex].sum() != :surf_output.append(\"\".format(slabel, findex[:])) else:surf_output.append(\"\")elements_output = \"\"for etype, group in mesh.elements.groupby(((\"\", \"\", \"\"),)):els = group.conn.replace(, np.nan).to_csv(header = False, float_format='').split()elements_output += \"\".format(etype)elements_output += (\"\".join([\"\" + s.strip().strip(\"\").replace(\"\", \"\") for s in els]))elements_output += \"\"elements_output = elements_output.strip() el_sets = {} section_output = \"\"for material, group in mesh.elements.groupby(\"\"):slabel = \"\".format(material)section_output += \"\".format(material,argiope.utils.list_to_string(group.index.values))if sections == \"\":section_output += \"\".format(material)if \"\" in mesh.elements.columns.levels[]: esets = set_to_inp(mesh.elements.sets.swaplevel(,, axis = )[\"\"],\"\")else:esets = \"\"", "docstring": "Exports the mesh to the INP format.", "id": "f3648:m4"} {"signature": "def to_triangulation(self):", "body": "from matplotlib.tri import Triangulationconn = self.split(\"\").unstack()coords = self.nodes.coords.copy()node_map = pd.Series(data = np.arange(len(coords)), index = coords.index)conn = node_map.loc[conn.values.flatten()].values.reshape(*conn.shape)return Triangulation(coords.x.values, coords.y.values, conn)", "docstring": "Returns the mesh as a matplotlib.tri.Triangulation instance. (2D only)", "id": "f3648:c5:m18"} {"signature": "def set_elements(self, elabels = None, types = None, stypes = \"\", conn = None, esets = {}, surfaces = {}, materials = \"\",**kwargs):", "body": "if elabels is None:warnings.warn(\"\", Warning)self.elements = Noneelse: columns = pd.MultiIndex.from_tuples([(\"\", \"\", \"\")])self.elements = pd.DataFrame(data = types,columns = columns,index = elabels)self.elements.index.name = \"\"self.elements.loc[:, (\"\", \"\", \"\")] = stypesc = pd.DataFrame(conn, index = elabels)c.fillna(, inplace = True)c[:] = c.values.astype(np.int32)c.columns = pd.MultiIndex.from_product([[\"\"], [\"\".format(n) for n in np.arange(c.shape[])], [\"\"]])self.elements = self.elements.join(c)for k, v in esets.items(): self.elements[(\"\", k, \"\")] = vself.elements[\"\", \"\", \"\"] = True for k, v in surfaces.items():for fk, vv in v.items():self.elements[(\"\", k, \"\".format(fk))] = vvself.elements[(\"\", \"\", \"\") ] = materialsself.elements.sort_index(axis = , inplace = True)", "docstring": "Sets the element data.\n\n:arg elabels: element labels. Items be strictly positive and int typed \n in 1D array-like with shape :math:`(N_e)`.\n:type elabels: 1D uint typed array-like\n:arg types: element types chosen among argiope specific element types. \n:type types: str typed array-like \n:arg stypes: element types chosen in solver (depends on the chosen solver) specific element types. \n:type stypes: str typed array-like \n:arg conn: connectivity table. In order to deal with non rectangular tables, :math:`0` can be used to fill missing data. \n:type conn: uint typed array-like\n:arg esets: element sets. Contains boolean array-like of shape :math:`(N_e)`.\n:type esets: dict \n:arg surfaces: surfaces. Contains boolean array-like of shape :math:`(N_e, N_s )` with :math:`N_s` being the maximum number of faces on a single element. \n:type surfaces: dict \n:arg materials: material keys. Any number a of materials can be used.\n:type materials: str typed array-like", "id": "f3648:c5:m3"} {"signature": "def stats(self):", "body": "cv = self.centroids_and_volumes()angles = self.angles()edges = self.edges()return pd.concat([cv , angles[[\"\"]], edges[[\"\"]] ], axis = ).sort_index(axis = )", "docstring": "Returns mesh quality and geometric stats.", "id": "f3648:c5:m13"} {"signature": "@numba.jitdef _make_conn(shape):", "body": "shape = np.array(shape)Ne = shape.prod()if len(shape) == :nx, ny = np.array(shape) + conn = np.zeros((Ne, ), dtype = np.int32)counter = pattern = np.array([,,+nx,nx])for j in range(shape[]):for i in range(shape[]):conn[counter] = pattern + + i + j*nxcounter += if len(shape) == :nx, ny, nz = np.array(shape) + conn = np.zeros((Ne, ), dtype = np.int32)counter = pattern = np.array([,,+nx,nx,nx*ny,+nx*ny,+(nx+)*ny,(nx+)*ny])for k in range(shape[]):for j in range(shape[]):for i in range(shape[]):conn[counter] = pattern + + i + j*nx+ k*nx*nycounter += return conn", "docstring": "Connectivity builder using Numba for speed boost.", "id": "f3648:m5"} {"signature": "def node_set_to_surface(self, tag):", "body": "nodes = self.nodes.copy()dummy = nodes.iloc[].copy()dummy[\"\"] *= np.nandummy[\"\"] = Truenodes.loc[] = dummyelement_surfaces= self.split(\"\").unstack()surf = pd.DataFrame(nodes.sets[tag].loc[element_surfaces.values.flatten()].values.reshape(element_surfaces.shape).prod(axis = ).astype(np.bool),index = element_surfaces.index).unstack().fillna(False)for k in surf.keys():self.elements[\"\", tag, \"\".format(k[]+) ] = surf.loc[:, k]", "docstring": "Converts a node set to surface.", "id": "f3648:c5:m15"} {"signature": "def space(self):", "body": "return self.elements.type.argiope.map(lambda t: ELEMENTS[t].space)", "docstring": "Returns the dimension of the embedded space of each element.", "id": "f3648:c5:m7"} {"signature": "def to_polycollection(self, *args, **kwargs):", "body": "from matplotlib import collectionsnodes, elements = self.nodes, self.elements.reset_index()verts = []index = []for etype, group in elements.groupby([(\"\", \"\", \"\")]):index += list(group.index)nvert = ELEMENTS[etype].nvertconn = group.conn.values[:, :nvert].flatten()coords = nodes.coords[[\"\", \"\"]].loc[conn].values.reshape(len(group), nvert, )verts += list(coords)verts = np.array(verts)verts= verts[np.argsort(index)]return collections.PolyCollection(verts, *args,**kwargs )", "docstring": "Returns the mesh as matplotlib polygon collection. (tested only for 2D meshes)", "id": "f3648:c5:m17"} {"signature": "def copy(self):", "body": "return copy.deepcopy(self)", "docstring": "Returns a copy of self.", "id": "f3650:c0:m1"} {"signature": "def list_to_string(l = range(), width = , indent = \"\"):", "body": "l = [str(v) + \"\" for v in l]counter = out = \"\" + indentfor w in l:s = len(w)if counter + s > width: out += \"\" + indentcounter = out += wcounter += sreturn out.strip(\"\")", "docstring": "Converts a list-like to string with given line width.", "id": "f3650:m2"} {"signature": "def load(path):", "body": "return pickle.load(gzip.open(path))", "docstring": "Loads a file.", "id": "f3650:m0"} {"signature": "def save(self, path):", "body": "pickle.dump(self, gzip.open(path, \"\"), )", "docstring": "Saves the instance into a compressed serialized file.", "id": "f3650:c0:m0"} {"signature": "def run_simulation(self):", "body": "self.make_directories()t0 = time.time()if self.verbose: print(''.format(self.label, self.solver.upper() )) if self.solver == \"\":command = ''.format(self.solver_path, self.label) process = subprocess.Popen(command, cwd = self.workdir, shell = True, stdout = subprocess.PIPE,stderr = subprocess.STDOUT)for line in iter(process.stdout.readline, b''):line = line.rstrip().decode('')print(\"\", line)t1 = time.time()if self.verbose: print(''.format(self.label, t1 - t0))", "docstring": "Runs the simulation.", "id": "f3651:c0:m2"} {"signature": "def get_steps(odb):", "body": "return odb.steps.keys()", "docstring": "Retrieves the steps keys in an odb object", "id": "f3654:m0"} {"signature": "def _path_to_dir_key(self, path):", "body": "_path = relpath(normpath(path))_key = (forcedir(\"\".format(self._prefix, _path)).lstrip(\"\").replace(\"\", self.delimiter))return _key", "docstring": "Converts an fs path to a s3 key.", "id": "f3662:c1:m4"} {"signature": "@classmethoddef factory(cls, filename, mode, on_close):", "body": "_temp_file = tempfile.TemporaryFile()proxy = cls(_temp_file, filename, mode, on_close=on_close)return proxy", "docstring": "Create a S3File backed with a temporary file.", "id": "f3662:c0:m0"} {"signature": "def run_cmake(arg=\"\"):", "body": "if ds.find_executable('') is None:print(\"\")print(\"\")sys.exit(-)print(\"\")cmake_args = argtry:build_dir = op.join(op.split(__file__)[], '')dd.mkpath(build_dir)os.chdir(\"\")ds.spawn(['', ''] + cmake_args.split())ds.spawn(['', ''])ds.spawn([''])os.chdir(\"\")except ds.DistutilsExecError:print(\"\")print(\"\")print(\"\")sys.exit(-)", "docstring": "Forcing to run cmake", "id": "f3669:m0"} {"signature": "def delete_queue(self, name):", "body": "content = {\"\": {\"\": self.object_name},\"\": \"\",\"\": {\"\": \"\",\"\": name,\"\": dict()}} logger.debug(\"\".format(content))return content, self.method_properties", "docstring": "Create message content and properties to delete queue with QMFv2\n\n :param name: Name of queue to delete\n :type name: str\n\n :returns: Tuple containing content and method properties", "id": "f3678:c0:m4"} {"signature": "def close(self):", "body": "if self.pinger:self.pinger.cancel()self.pinger = Noneif getattr(self, '', None):self.protocol.close()", "docstring": "Close the connection", "id": "f3696:c0:m11"} {"signature": "def send_agi_command(self, channel, command, as_list=False):", "body": "action = actions.Command({'': '','': channel,'': command},as_list=as_list)return self.send_action(action)", "docstring": "Send a :class:`~panoramisk.actions.Command` to the server:\n\n :param channel: Channel name where to launch command.\n Ex: 'SIP/000000-00000a53'\n :type channel: String\n :param command: command to launch. Ex: 'GET VARIABLE async_agi_server'\n :type command: String\n :param as_list: If True, the action Future will retrieve all responses\n :type as_list: boolean\n :return: a Future that will receive the response\n :rtype: asyncio.Future\n\n :Example:\n\n ::\n\n manager = Manager()\n resp = manager.send_agi_command('SIP/000000-00000a53',\n 'GET VARIABLE async_agi_server')\n\n\n Return a response :class:`~panoramisk.message.Message`.\n See https://wiki.asterisk.org/wiki/display/AST/Asterisk+11+ManagerAction_AGI", "id": "f3696:c0:m7"} {"signature": "def del_route(self, path):", "body": "if path not in self._route:raise ValueError('')del(self._route[path])", "docstring": "Delete a route for FastAGI requests:\n\n :param path: URI to answer. Ex: 'calls/start'\n :type path: String\n\n :Example:\n\n ::\n\n @asyncio.coroutine\n def start(request):\n print('Receive a FastAGI request')\n print(['AGI variables:', request.headers])\n\n fa_app = Application()\n fa_app.add_route('calls/start', start)\n fa_app.del_route('calls/start')", "id": "f3703:c1:m2"} {"signature": "def parse_agi_result(line):", "body": "if line == '':return {'': '','': ''}kwargs = dict(code=, response=\"\", line=line)m = re_code.search(line)try:kwargs.update(m.groupdict())except AttributeError:passreturn agi_code_check(**kwargs)", "docstring": "Parse AGI results using Regular expression.\n\n AGI Result examples::\n\n 100 result=0 Trying...\n\n 200 result=0\n\n 200 result=-1\n\n 200 result=132456\n\n 200 result= (timeout)\n\n 510 Invalid or unknown command\n\n 520-Invalid command syntax. Proper usage follows:\n int() argument must be a string, a bytes-like object or a number, not\n 'NoneType'\n\n HANGUP", "id": "f3704:m0"} {"signature": "def get_tree_item_url_name(page, with_namespace=False):", "body": "return get_model_url_name(get_app_n_model(''), page, with_namespace)", "docstring": "Returns a URL for a given Tree Item admin page type.", "id": "f3725:m2"} {"signature": "def item_move(self, request, tree_id, item_id, direction):", "body": "current_item = MODEL_TREE_ITEM_CLASS._default_manager.get(pk=item_id)if direction == '':sort_order = ''else:sort_order = ''siblings = MODEL_TREE_ITEM_CLASS._default_manager.filter(parent=current_item.parent,tree=current_item.tree).order_by(sort_order)previous_item = Nonefor item in siblings:if item != current_item:previous_item = itemelse:breakif previous_item is not None:current_item_sort_order = current_item.sort_orderprevious_item_sort_order = previous_item.sort_ordercurrent_item.sort_order = previous_item_sort_orderprevious_item.sort_order = current_item_sort_ordercurrent_item.save()previous_item.save()return HttpResponseRedirect('')", "docstring": "Moves item up or down by swapping 'sort_order' field values of neighboring items.", "id": "f3725:c0:m11"} {"signature": "def get_form(self, request, obj=None, **kwargs):", "body": "if obj is not None and obj.parent is not None:self.previous_parent = obj.parentprevious_parent_id = self.previous_parent.idelse:previous_parent_id = Nonemy_choice_field = TreeItemChoiceField(self.tree, initial=previous_parent_id)form = super(TreeItemAdmin, self).get_form(request, obj, **kwargs)my_choice_field.label = form.base_fields[''].labelmy_choice_field.help_text = form.base_fields[''].help_textmy_choice_field.widget = form.base_fields[''].widgetform.base_fields[''] = my_choice_fieldif not getattr(self, '', False):self.known_url_names = []self.known_url_rules = []resolver = get_resolver(get_urlconf())for ns, (url_prefix, ns_resolver) in resolver.namespace_dict.items():if ns != '':self._stack_known_urls(ns_resolver.reverse_dict, ns)self._stack_known_urls(resolver.reverse_dict)self.known_url_rules = sorted(self.known_url_rules)form.known_url_names_hint = _('''')form.known_url_names = self.known_url_namesform.known_url_rules = self.known_url_rulesreturn form", "docstring": "Returns modified form for TreeItem model.\n 'Parent' field choices are built by sitetree itself.", "id": "f3725:c0:m4"} {"signature": "def get_urls(self):", "body": "urls = super(TreeAdmin, self).get_urls()prefix_change = '' if DJANGO_POST_19 else ''sitetree_urls = [url(r'', redirects_handler, name=get_tree_item_url_name('')),url(r'' % prefix_change,self.admin_site.admin_view(self.tree_admin.item_add), name=get_tree_item_url_name('')),url(r'' % prefix_change,self.admin_site.admin_view(self.tree_admin.item_edit), name=get_tree_item_url_name('')),url(r'' % prefix_change,self.admin_site.admin_view(self.tree_admin.item_edit), name=get_tree_item_url_name('')),url(r'' % prefix_change,self.admin_site.admin_view(self.tree_admin.item_delete), name=get_tree_item_url_name('')),url(r'' % prefix_change,self.admin_site.admin_view(self.tree_admin.item_history), name=get_tree_item_url_name('')),url(r'' % prefix_change,self.admin_site.admin_view(self.tree_admin.item_move), name=get_tree_item_url_name('')),]if not DJANGO_POST_19:sitetree_urls = patterns_func('', *sitetree_urls)if SMUGGLER_INSTALLED:sitetree_urls += (url(r'', self.admin_site.admin_view(self.dump_view), name=''),)return sitetree_urls + urls", "docstring": "Manages not only TreeAdmin URLs but also TreeItemAdmin URLs.", "id": "f3725:c1:m2"} {"signature": "def response_add(self, request, obj, post_url_continue=None, **kwargs):", "body": "if post_url_continue is None:post_url_continue = '' % obj.pkreturn self._redirect(request, super(TreeItemAdmin, self).response_add(request, obj, post_url_continue))", "docstring": "Redirects to the appropriate items' 'continue' page on item add.\n\n As we administer tree items within tree itself, we\n should make some changes to redirection process.", "id": "f3725:c0:m2"} {"signature": "def _redirect(self, request, response):", "body": "if '' in request.POST:return HttpResponseRedirect('')elif '' in request.POST:return HttpResponseRedirect('')elif '' in request.POST:return responsereturn HttpResponseRedirect('')", "docstring": "Generic redirect for item editor.", "id": "f3725:c0:m1"} {"signature": "def get_tree_url_name(page, with_namespace=False):", "body": "return get_model_url_name(get_app_n_model(''), page, with_namespace)", "docstring": "Returns a URL for a given Tree admin page type.", "id": "f3725:m1"} {"signature": "@classmethoddef get_as_var(cls, tokens):", "body": "as_var = Noneif tokens[-] == '':as_var = tokens[-]tokens[-:] = []return as_var", "docstring": "Returns context variable from `as` template tag clause if any.\n\n Modifies tokens inplace.\n\n :param tokens:\n :rtype: None|str", "id": "f3739:c4:m1"} {"signature": "@register.tagdef sitetree_page_description(parser, token):", "body": "return sitetree_page_descriptionNode.for_tag(parser, token, '', '')", "docstring": "Renders a description for the current page, resolved against sitetree item representing current URL.", "id": "f3739:m6"} {"signature": "def render(context, tree_items, use_template):", "body": "context.push()context[''] = tree_itemsif isinstance(use_template, FilterExpression):use_template = use_template.resolve(context)content = get_template(use_template).render(context.flatten() if _CONTEXT_FLATTEN else context)context.pop()return content", "docstring": "Render helper is used by template node functions\n to render given template with given tree items in context.", "id": "f3739:m9"} {"signature": "@classmethoddef for_tag(cls, parser, token, preposition, error_hint):", "body": "tokens = token.split_contents()if len(tokens) >= and tokens[] == preposition:as_var = cls.get_as_var(tokens)tree_alias = parser.compile_filter(tokens[])return cls(tree_alias, as_var)raise template.TemplateSyntaxError('' % (tokens[], error_hint))", "docstring": "Node constructor to be used in tags.", "id": "f3739:c4:m0"} {"signature": "def get_value(self, context):", "body": "", "docstring": "Should return a computed value to be used in render().", "id": "f3739:c4:m3"} {"signature": "def detect_clause(parser, clause_name, tokens):", "body": "if clause_name in tokens:t_index = tokens.index(clause_name)clause_value = parser.compile_filter(tokens[t_index + ])del tokens[t_index:t_index + ]else:clause_value = Nonereturn clause_value", "docstring": "Helper function detects a certain clause in tag tokens list.\n Returns its value.", "id": "f3739:m8"} {"signature": "def options_getter(command_options):", "body": "def get_options(option_func=None):from optparse import make_optionfrom django.core.management.base import BaseCommandfunc = option_func or make_optionoptions = tuple([func(*option.args, **option.kwargs) for option in command_options])if option_func is None:if VERSION < (, ):result = BaseCommand.option_list + optionselse:result = []else:result = optionsreturn resultreturn get_options", "docstring": "Compatibility function to get rid of optparse in management commands after Django 1.10.\n\n :param tuple command_options: tuple with `CommandOption` objects.", "id": "f3741:m0"} {"signature": "def tree_climber(self, tree_alias, base_item):", "body": "if base_item is not None:base_item.in_current_branch = Trueif hasattr(base_item, '') and base_item.parent is not None:self.tree_climber(tree_alias, self.get_item_by_id(tree_alias, base_item.parent.id))", "docstring": "Climbs up the site tree to mark items of current branch.\n\n :param str|unicode tree_alias:\n :param TreeItemBase base_item:", "id": "f3745:c2:m24"} {"signature": "def get_sitetree():", "body": "sitetree = getattr(_THREAD_LOCAL, _THREAD_SITETREE, None)if sitetree is None:sitetree = SiteTree()setattr(_THREAD_LOCAL, _THREAD_SITETREE, sitetree)return sitetree", "docstring": "Returns SiteTree (thread-singleton) object, implementing utility methods.\n\n :rtype: SiteTree", "id": "f3745:m0"} {"signature": "def tree(self, tree_alias, context):", "body": "tree_alias, sitetree_items = self.init_tree(tree_alias, context)if not sitetree_items:return ''tree_items = self.filter_items(self.get_children(tree_alias, None), '')tree_items = self.apply_hook(tree_items, '')self.update_has_children(tree_alias, tree_items, '')return tree_items", "docstring": "Builds and returns tree structure for 'sitetree_tree' tag.\n\n :param str|unicode tree_alias:\n :param Context context:\n :rtype: list|str", "id": "f3745:c2:m18"} {"signature": "def filter_items(self, items, navigation_type=None):", "body": "if self.current_app_is_admin():return itemsitems_filtered = []context = self.current_page_contextcheck_access = self.check_accessfor item in items:if item.hidden:continueif not check_access(item, context):continueif not getattr(item, '' % navigation_type, True): continueitems_filtered.append(item)return items_filtered", "docstring": "Filters sitetree item's children if hidden and by navigation type.\n\n NB: We do not apply any filters to sitetree in admin app.\n\n :param list items:\n :param str|unicode navigation_type: sitetree, breadcrumbs, menu\n :rtype: list", "id": "f3745:c2:m22"} {"signature": "def set_entry(self, entry_name, key, value):", "body": "self.cache[entry_name][key] = value", "docstring": "Replaces entire cache entry parameter data by its name with new data.\n\n :param str|unicode entry_name:\n :param key:\n :param value:", "id": "f3745:c1:m7"} {"signature": "def get_sitetree(self, alias):", "body": "cache_ = self.cacheget_cache_entry = cache_.get_entryset_cache_entry = cache_.set_entrycaching_required = Falseif not self.current_app_is_admin():alias = self.resolve_tree_i18n_alias(alias)sitetree = get_cache_entry('', alias)if not sitetree:if DYNAMIC_ONLY:sitetree = []else:sitetree = (MODEL_TREE_ITEM_CLASS.objects.select_related('', '').prefetch_related('').filter(tree__alias__exact=alias).order_by('', ''))sitetree = self.attach_dynamic_tree_items(alias, sitetree)set_cache_entry('', alias, sitetree)caching_required = Trueparents = get_cache_entry('', alias)if not parents:parents = defaultdict(list)for item in sitetree:parent = getattr(item, '')parents[parent].append(item)set_cache_entry('', alias, parents)if caching_required:cache_update = cache_.update_entry_valuefor item in sitetree:cache_update('', alias, {item.id: item})url = self.urlcalculate_item_depth = self.calculate_item_depthfor item in sitetree:if caching_required:item.has_children = Falseif not hasattr(item, ''):item.depth = calculate_item_depth(alias, item.id)item.depth_range = range(item.depth)if item.access_restricted:permissions_src = (item.permissions if getattr(item, '', False)else item.access_permissions.all())item.perms = set(['' % (perm.content_type.app_label, perm.codename) for perm in permissions_src])item.url_resolved = url(item)item.title_resolved = LazyTitle(item.title) if VARIABLE_TAG_START in item.title else item.titleitem.is_current = Falseitem.in_current_branch = Falseself.get_tree_current_item(alias)if caching_required:cache_.save()return alias, sitetree", "docstring": "Gets site tree items from the given site tree.\n Caches result to dictionary.\n Returns (tree alias, tree items) tuple.\n\n :param str|unicode alias:\n :rtype: tuple", "id": "f3745:c2:m5"} {"signature": "def get_current_page_attr(self, attr_name, tree_alias, context):", "body": "tree_alias, sitetree_items = self.init_tree(tree_alias, context)current_item = self.get_tree_current_item(tree_alias)if current_item is None:if settings.DEBUG and RAISE_ITEMS_ERRORS_ON_DEBUG:raise SiteTreeError('''' % attr_name)return ''return getattr(current_item, attr_name, '')", "docstring": "Returns an arbitrary attribute of a sitetree item resolved as current for current page.\n\n :param str|unicode attr_name:\n :param str|unicode tree_alias:\n :param Context context:\n :rtype: str|unicode", "id": "f3745:c2:m12"} {"signature": "def calculate_item_depth(self, tree_alias, item_id, depth=):", "body": "item = self.get_item_by_id(tree_alias, item_id)if hasattr(item, ''):depth = item.depth + depthelse:if item.parent is not None:depth = self.calculate_item_depth(tree_alias, item.parent.id, depth + )return depth", "docstring": "Calculates depth of the item in the tree.\n\n :param str|unicode tree_alias:\n :param int item_id:\n :param int depth:\n :rtype: int", "id": "f3745:c2:m6"} {"signature": "def compose_dynamic_tree(src, target_tree_alias=None, parent_tree_item_alias=None, include_trees=None):", "body": "def result(sitetrees=src):if include_trees is not None:sitetrees = [tree for tree in sitetrees if tree.alias in include_trees]return {'': src,'': sitetrees,'': target_tree_alias,'': parent_tree_item_alias}if isinstance(src, six.string_types):try:module = import_app_sitetree_module(src)return None if module is None else result(getattr(module, '', None))except ImportError as e:if settings.DEBUG:warnings.warn('' % (src, e))return Nonereturn result()", "docstring": "Returns a structure describing a dynamic sitetree.utils\n The structure can be built from various sources,\n\n :param str|iterable src: If a string is passed to `src`, it'll be treated as the name of an app,\n from where one want to import sitetrees definitions. `src` can be an iterable\n of tree definitions (see `sitetree.toolbox.tree()` and `item()` functions).\n\n :param str|unicode target_tree_alias: Static tree alias to attach items from dynamic trees to.\n\n :param str|unicode parent_tree_item_alias: Tree item alias from a static tree to attach items from dynamic trees to.\n\n :param list include_trees: Sitetree aliases to filter `src`.\n\n :rtype: dict", "id": "f3745:m5"} {"signature": "def register_items_hook(func):", "body": "global _ITEMS_PROCESSORglobal _ITEMS_PROCESSOR_ARGS_LEN_ITEMS_PROCESSOR = funcif func:args_len = len(getargspec(func).args)if args_len not in {, }:raise SiteTreeError('')_ITEMS_PROCESSOR_ARGS_LEN = args_len", "docstring": "Registers a hook callable to process tree items right before they are passed to templates.\n\n Callable should be able to:\n\n a) handle ``tree_items`` and ``tree_sender`` key params.\n ``tree_items`` will contain a list of extended TreeItem objects ready to pass to template.\n ``tree_sender`` will contain navigation type identifier\n (e.g.: `menu`, `sitetree`, `breadcrumbs`, `menu.children`, `sitetree.children`)\n\n b) return a list of extended TreeItems objects to pass to template.\n\n\n Example::\n\n # Put the following code somewhere where it'd be triggered as expected. E.g. in app view.py.\n\n # First import the register function.\n from sitetree.sitetreeapp import register_items_hook\n\n # The following function will be used as items processor.\n def my_items_processor(tree_items, tree_sender):\n # Suppose we want to process only menu child items.\n if tree_sender == 'menu.children':\n # Lets add 'Hooked: ' to resolved titles of every item.\n for item in tree_items:\n item.title_resolved = 'Hooked: %s' % item.title_resolved\n # Return items list mutated or not.\n return tree_items\n\n # And we register items processor.\n register_items_hook(my_items_processor)\n\n :param func:", "id": "f3745:m1"} {"signature": "def current_app_is_admin(self):", "body": "is_admin = self._current_app_is_adminif is_admin is None:context = self.current_page_contextcurrent_app = getattr(getattr(context.get('', None), '', None), '',getattr(context, '', None))if current_app is None: current_app = context.get('', '')is_admin = current_app == ADMIN_APP_NAMEself._current_app_is_admin = is_adminreturn is_admin", "docstring": "Returns boolean whether current application is Admin contrib.\n\n :rtype: bool", "id": "f3745:c2:m4"} {"signature": "def get_tree_current_item(self, tree_alias):", "body": "current_item = self._current_items.get(tree_alias, _UNSET)if current_item is not _UNSET:if current_item is not None:current_item.is_current = True return current_itemcurrent_item = Noneif self.current_app_is_admin():self._current_items[tree_alias] = current_itemreturn Nonecurrent_url = self.current_request.pathif isinstance(current_url, str):current_url = current_url.encode('')if current_url:current_url = urlquote(current_url)for url_item, url in self._items_urls.items():if url != current_url:continueurl_item.is_current = Trueif url_item.tree.alias == tree_alias:current_item = url_itemif current_item is not None:self._current_items[tree_alias] = current_itemreturn current_item", "docstring": "Resolves current tree item of 'tree_alias' tree matching current\n request path against URL of given tree item.\n\n :param str|unicode tree_alias:\n :rtype: TreeItemBase", "id": "f3745:c2:m8"} {"signature": "def breadcrumbs(self, tree_alias, context):", "body": "tree_alias, sitetree_items = self.init_tree(tree_alias, context)if not sitetree_items:return ''current_item = self.get_tree_current_item(tree_alias)breadcrumbs = []if current_item is not None:context_ = self.current_page_contextcheck_access = self.check_accessget_item_by_id = self.get_item_by_iddef climb(base_item):\"\"\"\"\"\"if base_item.inbreadcrumbs and not base_item.hidden and check_access(base_item, context_):breadcrumbs.append(base_item)if hasattr(base_item, '') and base_item.parent is not None:climb(get_item_by_id(tree_alias, base_item.parent.id))climb(current_item)breadcrumbs.reverse()items = self.apply_hook(breadcrumbs, '')self.update_has_children(tree_alias, items, '')return items", "docstring": "Builds and returns breadcrumb trail structure for 'sitetree_breadcrumbs' tag.\n\n :param str|unicode tree_alias:\n :param Context context:\n :rtype: list|str", "id": "f3745:c2:m17"} {"signature": "@classmethoddef reset(cls):", "body": "cache.set('', True)", "docstring": "Instructs sitetree to drop and recreate cache.\n\n Could be used to show up tree changes made in a different process.", "id": "f3745:c1:m1"} {"signature": "def get_dynamic_trees():", "body": "return _DYNAMIC_TREES", "docstring": "Returns a dictionary with currently registered dynamic trees.", "id": "f3745:m4"} {"signature": "def get_tree_model():", "body": "return get_model_class('')", "docstring": "Returns the Tree model, set for the project.\n\n :rtype: TreeBase", "id": "f3750:m7"} {"signature": "def save(self, force_insert=False, force_update=False, **kwargs):", "body": "if self.parent == self:self.parent = Noneid_ = self.idif id_ and self.sort_order == :self.sort_order = id_super(TreeItemBase, self).save(force_insert, force_update, **kwargs)if self.sort_order == :self.sort_order = self.idself.save()", "docstring": "Ensure that item is not its own parent and set proper sort order.", "id": "f3751:c2:m0"} {"signature": "def twisted_app(port):", "body": "from twisted.web import serverfrom twisted.web.resource import Resourcefrom twisted.web.static import Filefrom web.apps.twisted_app import TwistedApplicationclass Main(Resource):def getChild(self, name, request):name = name.decode()if name == '':return selfreturn self.children[name]def render_GET(self, request):return HELLO_WORLD.encode()class Landing(Resource):isLeaf = Truedef render_GET(self, request):return LANDING_PAGE.encode()root = Main()root.putChild('', Landing())root.putChild('', File(STATIC_PATH))site = server.Site(root)app = TwistedApplication(port=port, site=site)app.timed_call(, app.stop)app.start()", "docstring": "With logging\n\n Running 30s test @ http://127.0.0.1:8888/\n 12 threads and 400 connections\n Thread Stats Avg Stdev Max +/- Stdev\n Latency 124.17ms 24.74ms 492.40ms 85.64%\n Req/Sec 245.94 80.01 0.86k 66.42%\n 87585 requests in 30.05s, 11.78MB read\n Requests/sec: 2914.22\n Transfer/sec: 401.27KB", "id": "f3757:m5"} {"signature": "def tornado_app(port):", "body": "import tornado.webfrom web.apps.tornado_app import TornadoApplicationfrom tornado.log import enable_pretty_loggingenable_pretty_logging()app = TornadoApplication()class Home(Handler):def get(self, req, resp):resp.write(HELLO_WORLD)resp.finish()class Landing(Handler):def get(self, req, resp):resp.write(LANDING_PAGE)resp.finish()app.add_route('', Home())app.add_route('', Landing())app.add_static_route('', STATIC_PATH)app.timed_call(, app.stop)app.start(port=port)", "docstring": "Even without logging it's slower!\n\n Running 30s test @ http://127.0.0.1:8888/\n 12 threads and 400 connections\n Thread Stats Avg Stdev Max +/- Stdev\n Latency 179.14ms 26.19ms 464.63ms 92.60%\n Req/Sec 184.55 107.47 560.00 57.59%\n 64871 requests in 30.10s, 12.87MB read\n Requests/sec: 2155.42\n Transfer/sec: 437.82KB\n\n with logging\n\n Running 30s test @ http://127.0.0.1:8888/\n 12 threads and 400 connections\n Thread Stats Avg Stdev Max +/- Stdev\n Latency 209.77ms 28.48ms 320.47ms 91.43%\n Req/Sec 156.14 79.60 500.00 63.72%\n 55415 requests in 30.10s, 10.99MB read\n Requests/sec: 1841.04\n Transfer/sec: 373.96KB", "id": "f3757:m4"} {"signature": "def falcon_app(port):", "body": "from web.apps.falcon_app import FalconApplicationapp = FalconApplication()class Home(Handler):def get(self, req, resp):resp.body = HELLO_WORLDclass Landing(Handler):def get(self, req, resp):resp.body = LANDING_PAGEapp.add_route('', Home())app.add_route('', Landing())app.add_static_route('', STATIC_PATH)app.start(port=port)", "docstring": "With logging\n\n Running 30s test @ http://127.0.0.1:8888/\n 12 threads and 400 connections\n Thread Stats Avg Stdev Max +/- Stdev\n Latency 62.65ms 10.04ms 189.96ms 87.87%\n Req/Sec 526.93 159.98 1.00k 64.25%\n 188860 requests in 30.06s, 23.41MB read\n Requests/sec: 6283.35\n Transfer/sec: 797.69KB", "id": "f3757:m2"} {"signature": "def on_message(self, message):", "body": "change = json.loads(message)log.debug(f'')ref = change.get('')if not ref:returnnodes = self.viewer.xpath('', ref=ref)if not nodes:return node = nodes[]if change.get('') and change.get(''):if change[''] == '':trigger = getattr(node, change[''])trigger()elif change[''] == '':setattr(node, change[''], change[''])else:log.warning(f\"\")", "docstring": "When we get an event from js, lookup the node and invoke the\n action on the enaml node.", "id": "f3762:c1:m1"} {"signature": "@observe('','','')def _update_menus(self,change):", "body": "menus = {}links = [p.link for p in self.pages if p.link] + self.links for link in links:for menu in link.menus:if menu not in menus:menus[menu] = []menus[menu].append(link)for name,menu in menus.items():k = ''.format(name)if hasattr(self,k):setattr(self,k,menu)", "docstring": "When pages change, update the menus", "id": "f3767:c0:m0"} {"signature": "def prepare(self):", "body": "if self.__class__.view:returnwith enaml.imports():View = pydoc.locate(self.page.view)assert View, \"\".format(self.page.view)self.__class__.view = View(site=self.site,page=self.page,request=self.request,)", "docstring": "Load the view on first load", "id": "f3769:c0:m0"} {"signature": "def prepare(self, **kwargs):", "body": "for k, v in kwargs.items():setattr(self, k, v)if not self.is_initialized:self.initialize()if not self.proxy_is_active:self.activate_proxy()", "docstring": "Prepare for rendering", "id": "f3777:c1:m7"} {"signature": "def xpath(self, query, **kwargs):", "body": "nodes = self.proxy.find(query, **kwargs)return [n.declaration for n in nodes]", "docstring": "Find nodes matching the given xpath query", "id": "f3777:c1:m6"} {"signature": "@observe('', '')def _update_proxy(self, change):", "body": "super(Code, self)._update_proxy(change)", "docstring": "The superclass implementation is sufficient.", "id": "f3779:c1:m0"} {"signature": "def render(self):", "body": "return tostring(self.widget, pretty_print=True,encoding='', method='')", "docstring": "Render the widget tree into a string", "id": "f3784:c0:m9"} {"signature": "def child_added(self, child):", "body": "super(WebComponent, self).child_added(child)if child.widget is not None:for i, c in enumerate(self.children()):if c == child:self.widget.insert(i, child.widget)break", "docstring": "Handle the child added event from the declaration.\n\n This handler will insert the child toolkit widget in the correct.\n position. Subclasses which need more control should reimplement this\n method.", "id": "f3784:c0:m7"} {"signature": "def set_draggable(self, draggable):", "body": "self.widget.set('', '' if draggable else '')", "docstring": "The draggable attr must be explicitly set to true or false", "id": "f3784:c0:m20"} {"signature": "def child_removed(self, child):", "body": "super(WebComponent, self).child_removed(child)if child.widget is not None:for i, c in enumerate(self.children()):if c == child:del self.widget[i]break", "docstring": "Handle the child removed event from the declaration.\n\n This handler will unparent the child toolkit widget. Subclasses\n which need more control should reimplement this method.", "id": "f3784:c0:m8"} {"signature": "def init_events(self):", "body": "pass", "docstring": "Initialize the event handlers of the toolkit widget.\n\n This method is called during the bottom-up pass. This method\n should initialize the event handlers for the widget. The child widgets\n will be fully initialized and layed out when this is called.", "id": "f3784:c0:m3"} {"signature": "def init_layout(self):", "body": "pass", "docstring": "Initialize the layout of the toolkit widget.\n\n This method is called during the bottom-up pass. This method\n should initialize the layout of the widget. The child widgets\n will be fully initialized and layed out when this is called.", "id": "f3784:c0:m2"} {"signature": "def assertImageEqual(self, image1, image2):", "body": "h1 = image1.histogram()h2 = image2.histogram()rms = math.sqrt(reduce(operator.add,map(lambda a, b: (a - b) ** , h1, h2)) // len(h1))self.assertEqual(rms, )", "docstring": "Assert that `image1` & `image2` are identical images.", "id": "f3798:c1:m2"} {"signature": "def get_filtered_root_folder(self):", "body": "folder, filename = os.path.split(self.name)return os.path.join(folder, VERSATILEIMAGEFIELD_FILTERED_DIRNAME, '')", "docstring": "Return the location where filtered images are stored.", "id": "f3800:c0:m7"} {"signature": "@propertydef ppoi(self):", "body": "return self._ppoi_value", "docstring": "Primary Point of Interest (ppoi) getter.", "id": "f3800:c0:m4"} {"signature": "def get_filtered_sized_root_folder(self):", "body": "sized_root_folder = self.get_sized_root_folder()return os.path.join(sized_root_folder,VERSATILEIMAGEFIELD_FILTERED_DIRNAME)", "docstring": "Return the location where filtered + sized images are stored.", "id": "f3800:c0:m9"} {"signature": "def delete_all_created_images(self):", "body": "self.delete_filtered_images()self.delete_sized_images()self.delete_filtered_sized_images()", "docstring": "Delete all images created from `self.name`.", "id": "f3800:c0:m14"} {"signature": "def __init__(self, instance_or_queryset,rendition_key_set, image_attr, verbose=False):", "body": "if isinstance(instance_or_queryset, Model):queryset = instance_or_queryset.__class__._default_manager.filter(pk=instance_or_queryset.pk)elif isinstance(instance_or_queryset, QuerySet):queryset = instance_or_querysetelse:raise ValueError(\"\"\"\".format(self.__class__.__name__))self.queryset = querysetif isinstance(rendition_key_set, six.string_types):rendition_key_set = get_rendition_key_set(rendition_key_set)self.size_key_list = [size_keyfor key, size_key in validate_versatileimagefield_sizekey_list(rendition_key_set)]self.image_attr = image_attrself.verbose = verbose", "docstring": "Arguments:\n`instance_or_queryset`: A django model instance or QuerySet\n`rendition_key_set`: Either a string that corresponds to a key on\n settings.VERSATILEIMAGEFIELD_RENDITION_KEY_SETS\n or an iterable\n of 2-tuples, both strings:\n [0]: The 'name' of the image size.\n [1]: A VersatileImageField 'size_key'.\n Example: [\n ('large', 'url'),\n ('medium', 'crop__400x400'),\n ('small', 'thumbnail__100x100')\n ]\n`image_attr`: A dot-notated path to a VersatileImageField on\n `instance_or_queryset`\n`verbose`: bool signifying whether a progress bar should be printed\n to sys.stdout", "id": "f3804:c0:m0"} {"signature": "def warm(self):", "body": "num_images_pre_warmed = failed_to_create_image_path_list = []total = self.queryset.count() * len(self.size_key_list)for a, instance in enumerate(self.queryset, start=):for b, size_key in enumerate(self.size_key_list, start=):success, url_or_filepath = self._prewarm_versatileimagefield(size_key,reduce(getattr, self.image_attr.split(\"\"), instance))if success is True:num_images_pre_warmed += if self.verbose:cli_progress_bar(num_images_pre_warmed, total)else:failed_to_create_image_path_list.append(url_or_filepath)if a * b == total:stdout.write('')stdout.flush()return (num_images_pre_warmed, failed_to_create_image_path_list)", "docstring": "Returns a 2-tuple:\n[0]: Number of images successfully pre-warmed\n[1]: A list of paths on the storage class associated with the\n VersatileImageField field being processed by `self` of\n files that could not be successfully seeded.", "id": "f3804:c0:m2"} {"signature": "def md5(image_key):", "body": "return hashlib.md5(image_key.encode('')).hexdigest()", "docstring": "Return the md5 hash of image_key.", "id": "f3806:m0"} {"signature": "def md5_16(image_key):", "body": "return md5(image_key)[:]", "docstring": "Return the first 16 characters of the md5 hash of image_key.", "id": "f3806:m1"} {"signature": "def process_placeholder_image(self):", "body": "if self.placeholder_image_name:returnplaceholder_image_name = Noneplaceholder_image = self.placeholder_imageif placeholder_image:if isinstance(placeholder_image, OnStoragePlaceholderImage):name = placeholder_image.pathelse:name = placeholder_image.image_data.nameplaceholder_image_name = os.path.join(VERSATILEIMAGEFIELD_PLACEHOLDER_DIRNAME, name)if not self.storage.exists(placeholder_image_name):self.storage.save(placeholder_image_name,placeholder_image.image_data)self.placeholder_image_name = placeholder_image_name", "docstring": "Process the field's placeholder image.\n\nEnsures the placeholder image has been saved to the same storage class\nas the field in a top level folder with a name specified by\nsettings.VERSATILEIMAGEFIELD_SETTINGS['placeholder_directory_name']\n\nThis should be called by the VersatileImageFileDescriptor __get__.\nIf self.placeholder_image_name is already set it just returns right away.", "id": "f3809:c1:m1"} {"signature": "def __init__(self, path):", "body": "self.path = path", "docstring": "`path` - An absolute path to an on-disc image.", "id": "f3811:c1:m0"} {"signature": "def get_ppoi_id(self, name):", "body": "return name + ''", "docstring": "Given the name of the primary point of interest tag, return the HTML id for it.", "id": "f3813:c0:m2"} {"signature": "def get_context(self, name, value, attrs):", "body": "if self.has_template_widget_rendering:context = super(ClearableFileInputWithImagePreview, self).get_context(name, value, attrs)else:context = {}context[''] = {'': name,'': self.is_hidden,'': self.is_required,'': self._format_value(value),'': self.build_attrs(self.attrs, attrs),'': self.template_name,'': self.input_type,}checkbox_name = self.clear_checkbox_name(name)checkbox_id = self.clear_checkbox_id(checkbox_name)context[''].update({'': checkbox_name,'': checkbox_id,'': self.is_initial(value),'': self.input_text,'': self.initial_text,'': self.clear_checkbox_label,})if value and hasattr(value, \"\"):context[''].update({'': self.get_hidden_field_id(name),'': self.get_point_stage_id(name),'': self.get_ppoi_id(name),'': self.get_sized_url(value),'': self.image_preview_id(name),})return context", "docstring": "Get the context to render this widget with.", "id": "f3813:c0:m6"} {"signature": "def build_attrs(self, base_attrs, extra_attrs=None):", "body": "attrs = base_attrs.copy()if extra_attrs is not None:attrs.update(extra_attrs)return attrs", "docstring": "Build an attribute dictionary.", "id": "f3813:c0:m7"} {"signature": "def register_sizer(self, attr_name, sizedimage_cls):", "body": "if attr_name.startswith('') or attr_name in self.unallowed_sizer_names:raise UnallowedSizerName(\"\"\"\"\"\" % (attr_name,''.join([namefor name in self.unallowed_sizer_names])))if not issubclass(sizedimage_cls, SizedImage):raise InvalidSizedImageSubclass('''')if attr_name in self._sizedimage_registry:raise AlreadyRegistered('''''' % attr_name)else:self._sizedimage_registry[attr_name] = sizedimage_cls", "docstring": "Register a new SizedImage subclass (`sizedimage_cls`).\n\nTo be used via the attribute (`attr_name`).", "id": "f3814:c6:m1"} {"signature": "def unregister_sizer(self, attr_name):", "body": "if attr_name not in self._sizedimage_registry:raise NotRegistered('' % attr_name)else:del self._sizedimage_registry[attr_name]", "docstring": "Unregister the SizedImage subclass currently assigned to `attr_name`.\n\nIf a SizedImage subclass isn't already registered to `attr_name`\nNotRegistered will raise.", "id": "f3814:c6:m2"} {"signature": "def autodiscover():", "body": "from importlib import import_modulefrom django.apps import appsfrom django.utils.module_loading import module_has_submodulefor app_config in apps.get_app_configs():try:before_import_sizedimage_registry = copy.copy(versatileimagefield_registry._sizedimage_registry)before_import_filter_registry = copy.copy(versatileimagefield_registry._filter_registry)import_module('' % app_config.name)except Exception:versatileimagefield_registry._sizedimage_registry =before_import_sizedimage_registryversatileimagefield_registry._filter_registry =before_import_filter_registryif module_has_submodule(app_config.module, ''):raise", "docstring": "Discover versatileimagefield.py modules.\n\nIterate over django.apps.get_app_configs() and discover\nversatileimagefield.py modules.", "id": "f3814:m0"} {"signature": "def unregister_filter(self, attr_name):", "body": "if attr_name not in self._filter_registry:raise NotRegistered('' % attr_name)else:del self._filter_registry[attr_name]", "docstring": "Unregister the FilteredImage subclass currently assigned to attr_name.\n\nIf a FilteredImage subclass isn't already registered to filters.\n`attr_name` NotRegistered will raise.", "id": "f3814:c6:m4"} {"signature": "def get_filtered_filename(filename, filename_key):", "body": "try:image_name, ext = filename.rsplit('', )except ValueError:image_name = filenameext = ''return \"\" % ({'': image_name,'': filename_key,'': ext})", "docstring": "Return the 'filtered filename' (according to `filename_key`)\nin the following format:\n`filename`__`filename_key`__.ext", "id": "f3815:m3"} {"signature": "def build_versatileimagefield_url_set(image_instance, size_set, request=None):", "body": "size_set = validate_versatileimagefield_sizekey_list(size_set)to_return = {}if image_instance or image_instance.field.placeholder_image:for key, image_key in size_set:img_url = get_url_from_image_key(image_instance, image_key)if request is not None:img_url = request.build_absolute_uri(img_url)to_return[key] = img_urlreturn to_return", "docstring": "Return a dictionary of urls corresponding to size_set\n- `image_instance`: A VersatileImageFieldFile\n- `size_set`: An iterable of 2-tuples, both strings. Example:\n [\n ('large', 'url'),\n ('medium', 'crop__400x400'),\n ('small', 'thumbnail__100x100')\n ]\n\n The above would lead to the following response:\n {\n 'large': 'http://some.url/image.jpg',\n 'medium': 'http://some.url/__sized__/image-crop-400x400.jpg',\n 'small': 'http://some.url/__sized__/image-thumbnail-100x100.jpg',\n }\n- `request`:", "id": "f3815:m8"} {"signature": "def get_resized_path(path_to_image, width, height,filename_key, storage):", "body": "containing_folder, filename = os.path.split(path_to_image)resized_filename = get_resized_filename(filename,width,height,filename_key)joined_path = os.path.join(*[VERSATILEIMAGEFIELD_SIZED_DIRNAME,containing_folder,resized_filename]).replace('', '') return joined_path", "docstring": "Return a `path_to_image` location on `storage` as dictated by `width`, `height`\nand `filename_key`", "id": "f3815:m2"} {"signature": "def get_filtered_path(path_to_image, filename_key, storage):", "body": "containing_folder, filename = os.path.split(path_to_image)filtered_filename = get_filtered_filename(filename, filename_key)path_to_return = os.path.join(*[containing_folder,VERSATILEIMAGEFIELD_FILTERED_DIRNAME,filtered_filename])path_to_return = path_to_return.replace('', '')return path_to_return", "docstring": "Return the 'filtered path'", "id": "f3815:m4"} {"signature": "def process_image(self, image, image_format, **kwargs):", "body": "raise NotImplementedError('')", "docstring": "Ensure NotImplemented is raised if not overloaded by subclasses.\n\nArguments:\n * `image`: a PIL Image instance\n * `image_format`: str, a valid PIL format (i.e. 'JPEG' or 'GIF')\n\nReturns a BytesIO representation of the resized image.\n\nSubclasses MUST implement this method.", "id": "f3817:c0:m1"} {"signature": "def create_filtered_image(self, path_to_image, save_path_on_storage):", "body": "image, file_ext, image_format, mime_type = self.retrieve_image(path_to_image)image, save_kwargs = self.preprocess(image, image_format)imagefile = self.process_image(image, image_format, save_kwargs)self.save_image(imagefile, save_path_on_storage, file_ext, mime_type)", "docstring": "Creates a filtered image.\n`path_to_image`: The path to the image with the media directory\n to resize.\n`save_path_on_storage`: Where on self.storage to save the filtered\n image", "id": "f3819:c1:m1"} {"signature": "def __getitem__(self, key):", "body": "try:width, height = [int(i) for i in key.split('')]except (KeyError, ValueError):raise MalformedSizedImageKey(\"\"\"\"\"\" % self.__class__.__name__)if not self.path_to_image and getattr(settings, '', False):resized_url = \"\" % (width, height)resized_storage_path = resized_urlelse:resized_storage_path = get_resized_path(path_to_image=self.path_to_image,width=width,height=height,filename_key=self.get_filename_key(),storage=self.storage)try:resized_url = self.storage.url(resized_storage_path)except Exception:resized_url = Noneif self.create_on_demand is True:if cache.get(resized_url) and resized_url is not None:passelse:if resized_storage_path and not self.storage.exists(resized_storage_path):self.create_resized_image(path_to_image=self.path_to_image,save_path_on_storage=resized_storage_path,width=width,height=height)resized_url = self.storage.url(resized_storage_path)cache.set(resized_url, , VERSATILEIMAGEFIELD_CACHE_LENGTH)return SizedImageInstance(name=resized_storage_path,url=resized_url,storage=self.storage)", "docstring": "Return a URL to an image sized according to key.\n\nArguments:\n * `key`: A string in the following format\n '[width-in-pixels]x[height-in-pixels]'\n Example: '400x400'", "id": "f3820:c2:m5"} {"signature": "def ppoi_as_str(self):", "body": "return \"\" % (str(self.ppoi[]).replace('', ''),str(self.ppoi[]).replace('', ''))", "docstring": "Return PPOI value as a string.", "id": "f3820:c2:m1"} {"signature": "def process_image(self, image, image_format, save_kwargs={}):", "body": "imagefile = BytesIO()inv_image = ImageOps.invert(image)inv_image.save(imagefile,**save_kwargs)return imagefile", "docstring": "Return a BytesIO instance of `image` with inverted colors.", "id": "f3821:c2:m0"} {"signature": "def get_filename_key(self):", "body": "return \"\".format(key=self.filename_key,ppoi=self.ppoi_as_str())", "docstring": "Return the filename key for cropped images.", "id": "f3821:c0:m0"} {"signature": "def get_usage(self, subcommand):", "body": "return self.usage or ''.format(subcommand=subcommand)", "docstring": "Return a brief description of how to use this command, by\ndefault from the attribute ``self.help``.", "id": "f3830:c0:m1"} {"signature": "def __init__(self, app, client):", "body": "self.app = appself.client = client", "docstring": ":param app: WSGI application object\n:param client: Instance of StackSentinel", "id": "f3838:c2:m0"} {"signature": "@staticmethoddef _serialize_object(obj):", "body": "try:return repr(obj)except:return ''", "docstring": "When the state of an exception includes something that we can't pickle, show something useful instead.", "id": "f3838:c1:m1"} {"signature": "def handle_exception(self, exc_info=None, state=None, tags=None, return_feedback_urls=False,dry_run=False):", "body": "if not exc_info:exc_info = sys.exc_info()if exc_info is None:raise StackSentinelError(\"\")(etype, value, tb) = exc_infotry:msg = value.args[]except:msg = repr(value)if not isinstance(tags, list):tags = [tags]limit = Nonenew_tb = []n = while tb is not None and (limit is None or n < limit):f = tb.tb_framelineno = tb.tb_linenoco = f.f_codefilename = co.co_filenamename = co.co_nametb = tb.tb_nextn = n + new_tb.append({'': lineno, '': filename, '': name})if state is None:state = {}if '' not in state:try:state[''] = self._get_sys_info()except Exception as e:state[''] = '' % eif '' not in state:try:state[''] = self._get_machine_info()except Exception as e:state[''] = '' % eif tags is None:tags = []if sys.version_info.major > :error_type = str(etype.__name__)error_message = str(value)else:error_type = unicode(etype.__name__)error_message = unicode(value)send_error_args = dict(error_type=error_type,error_message=error_message,traceback=new_tb,environment=self.environment,state=state,tags=self.tags + tags,return_feedback_urls=return_feedback_urls)if dry_run:return send_error_argselse:return self.send_error(**send_error_args)", "docstring": "Call this method from within a try/except clause to generate a call to Stack Sentinel.\n\n:param exc_info: Return value of sys.exc_info(). If you pass None, handle_exception will call sys.exc_info() itself\n:param state: Dictionary of state information associated with the error. This could be form data, cookie data, whatnot. NOTE: sys and machine are added to this dictionary if they are not already included.\n:param tags: Any string tags you want associated with the exception report.\n:param return_feedback_urls: If True, Stack Sentinel will return feedback URLs you can present to the user for extra debugging information.\n:param dry_run: If True, method will not actively send in error information to API. Instead, it will return a request object and payload. Used in unittests.", "id": "f3838:c1:m2"} {"signature": "def __init__(self, account_token, project_token, environment, tags=None,endpoint=\"\"):", "body": "self.account_token = account_tokenself.project_token = project_tokenself.endpoint = endpointself.environment = environmentif tags:self.tags = tagselse:self.tags = []", "docstring": ":param account_token: Your account token, as supplied by StackSentinel\n:param project_token: Your project token, as supplied by StackSentinel\n:param environment: The environment of the project (eg, \"production\", \"devel\", etc)\n:param tags: Any tags you want associated with *all* errors sent using this client.\n:param endpoint: API endpoint. Defaults to StackSentinel backend.", "id": "f3838:c1:m0"} {"signature": "def as_string(self, default_from=None):", "body": "encoding = self.charset or ''attachments = self.attachments or []if len(attachments) == and not self.html:msg = self._mimetext(self.body)elif len(attachments) > and not self.html:msg = MIMEMultipart()msg.attach(self._mimetext(self.body))else:msg = MIMEMultipart()alternative = MIMEMultipart('')alternative.attach(self._mimetext(self.body, ''))alternative.attach(self._mimetext(self.html, ''))msg.attach(alternative)if self.charset:msg[''] = Header(self.subject, encoding)else:msg[''] = self.subjectsender = self.sender or default_fromif sender is not None:msg[''] = sanitize_address(sender, encoding)msg[''] = ''.join(list(set(sanitize_addresses(self.recipients, encoding))))msg[''] = formatdate(self.date, localtime=True)msg[''] = self.msgIdif self.cc:msg[''] = ''.join(list(set(sanitize_addresses(self.cc, encoding))))if self.reply_to:msg[''] = sanitize_address(self.reply_to, encoding)if self.extra_headers:for k, v in self.extra_headers.items():msg[k] = vfor attachment in attachments:f = MIMEBase(*attachment.content_type.split(''))f.set_payload(attachment.data)encode_base64(f)try:attachment.filename and attachment.filename.encode('')except UnicodeEncodeError:filename = attachment.filenameif not PY3:filename = filename.encode('')f.add_header('', attachment.disposition,filename=('', '', filename))else:f.add_header('', '' %(attachment.disposition, attachment.filename))for key, value in attachment.headers:f.add_header(key, value)msg.attach(f)return msg.as_string()", "docstring": "Creates the email", "id": "f3840:c4:m3"} {"signature": "def send(self, message):", "body": "with self.connect() as connection:message.send(connection)", "docstring": "Sends a single message instance. If TESTING is True the message will\n not actually be sent.\n\n :param message: a Message instance.", "id": "f3840:c5:m6"} {"signature": "def send(self, connection):", "body": "connection.send(self)", "docstring": "Verifies and sends the message.", "id": "f3840:c4:m7"} {"signature": "def attach(self,filename=None,content_type=None,data=None,disposition=None,headers=None):", "body": "self.attachments.append(Attachment(filename, content_type, data, disposition, headers))", "docstring": "Adds an attachment to the message.\n\n :param filename: filename of attachment\n :param content_type: file mimetype\n :param data: the raw file data\n :param disposition: content-disposition (if any)", "id": "f3840:c4:m9"} {"signature": "def shortDescription(self):", "body": "doc = self.id()[self.id().rfind('')+:]return \"\" % (self.__class__.__name__, doc)", "docstring": "Get's the one liner description to be displayed.\nSource:\nhttp://erikzaadi.com/2012/09/13/inheritance-within-python-unit-tests/", "id": "f3845:c0:m0"} {"signature": "def iter_verbs(self, c):", "body": "for verb in ['', '', '']:yield getattr(c, verb)", "docstring": "A simple helper method to iterate through a range of\n HTTP Verbs and return the test_client bound instance,\n keeping writing our tests as DRY as possible.", "id": "f3845:c0:m1"} {"signature": "@app.route(\"\", methods=['', ''])def list_users(request):", "body": "return json({\"\": \"\"})", "docstring": "Since the path matches the regular expression r'/api/*', this resource\nautomatically has CORS headers set. The expected result is as follows:\n\n$ curl --include -X GET http://127.0.0.1:5000/api/v1/users/ \\\n --header Origin:www.examplesite.com\nHTTP/1.0 200 OK\nAccess-Control-Allow-Headers: Content-Type\nAccess-Control-Allow-Origin: *\nContent-Length: 21\nContent-Type: application/json\nDate: Sat, 09 Aug 2014 00:26:41 GMT\nServer: Werkzeug/0.9.4 Python/2.7.8\n\n{\n \"success\": true\n}", "id": "f3863:m1"} {"signature": "def cross_origin(app, *args, **kwargs):", "body": "_options = kwargs_real_decorator = cors.decorate(app, *args, run_middleware=False, with_context=False, **kwargs)def wrapped_decorator(f):spf = SanicPluginsFramework(app) try:plugin = spf.register_plugin(cors, skip_reg=True)except ValueError as e:assert e.args and len(e.args) > plugin = e.args[]context = cors.get_context_from_spf(spf)log = context.loglog(logging.DEBUG, \"\".format(str(f), str(_options)))return _real_decorator(f)return wrapped_decorator", "docstring": "This function is the decorator which is used to wrap a Sanic route with.\nIn the simplest case, simply use the default parameters to allow all\norigins in what is the most permissive configuration. If this method\nmodifies state or performs authentication which may be brute-forced, you\nshould add some degree of protection, such as Cross Site Forgery\nRequest protection.\n\n:param origins:\n The origin, or list of origins to allow requests from.\n The origin(s) may be regular expressions, case-sensitive strings,\n or else an asterisk\n\n Default : '*'\n:type origins: list, string or regex\n\n:param methods:\n The method or list of methods which the allowed origins are allowed to\n access for non-simple requests.\n\n Default : [GET, HEAD, POST, OPTIONS, PUT, PATCH, DELETE]\n:type methods: list or string\n\n:param expose_headers:\n The header or list which are safe to expose to the API of a CORS API\n specification.\n\n Default : None\n:type expose_headers: list or string\n\n:param allow_headers:\n The header or list of header field names which can be used when this\n resource is accessed by allowed origins. The header(s) may be regular\n expressions, case-sensitive strings, or else an asterisk.\n\n Default : '*', allow all headers\n:type allow_headers: list, string or regex\n\n:param supports_credentials:\n Allows users to make authenticated requests. If true, injects the\n `Access-Control-Allow-Credentials` header in responses. This allows\n cookies and credentials to be submitted across domains.\n\n :note: This option cannot be used in conjuction with a '*' origin\n\n Default : False\n:type supports_credentials: bool\n\n:param max_age:\n The maximum time for which this CORS request maybe cached. This value\n is set as the `Access-Control-Max-Age` header.\n\n Default : None\n:type max_age: timedelta, integer, string or None\n\n:param send_wildcard: If True, and the origins parameter is `*`, a wildcard\n `Access-Control-Allow-Origin` header is sent, rather than the\n request's `Origin` header.\n\n Default : False\n:type send_wildcard: bool\n\n:param vary_header:\n If True, the header Vary: Origin will be returned as per the W3\n implementation guidelines.\n\n Setting this header when the `Access-Control-Allow-Origin` is\n dynamically generated (e.g. when there is more than one allowed\n origin, and an Origin than '*' is returned) informs CDNs and other\n caches that the CORS headers are dynamic, and cannot be cached.\n\n If False, the Vary header will never be injected or altered.\n\n Default : True\n:type vary_header: bool\n\n:param automatic_options:\n Only applies to the `cross_origin` decorator. If True, Sanic-CORS will\n override Sanic's default OPTIONS handling to return CORS headers for\n OPTIONS requests.\n\n Default : True\n:type automatic_options: bool", "id": "f3866:m0"} {"signature": "def serialize_options(opts):", "body": "options = (opts or {}).copy()for key in opts.keys():if key not in DEFAULT_OPTIONS:LOG.warning(\"\", key)options[''] = sanitize_regex_param(options.get(''))options[''] = sanitize_regex_param(options.get(''))if r'' in options[''] and options[''] and options['']:raise ValueError(\"\"\"\"\"\")serialize_option(options, '')serialize_option(options, '', upper=True)if isinstance(options.get(''), timedelta):options[''] = str(int(options[''].total_seconds()))return options", "docstring": "A helper method to serialize and processes the options dictionary.", "id": "f3868:m16"} {"signature": "def set_cors_headers(req, resp, context, options):", "body": "try:request_context = context.request[id(req)]except AttributeError:LOG.debug(\"\")return respevaluated = request_context.get(SANIC_CORS_EVALUATED, False)if evaluated:LOG.debug('')return respif resp is None:return Noneif resp.headers is None:resp.headers = CIMultiDict()headers_to_set = get_cors_headers(options, req.headers, req.method)LOG.debug('', str(headers_to_set))try:resp.headers.extend(headers_to_set)except Exception as e1:for k, v in headers_to_set.items():try:resp.headers.add(k, v)except Exception as e2:resp.headers[k] = vreturn resp", "docstring": "Performs the actual evaluation of Sanic-CORS options and actually\nmodifies the response object.\n\nThis function is used both in the decorator and the after_request\ncallback\n:param sanic.request.Request req:", "id": "f3868:m5"} {"signature": "def make_osm_query(query):", "body": "osm_url = ''req = requests.get(osm_url, params={'': query})req.raise_for_status()return req.json()", "docstring": "Make a request to OSM and return the parsed JSON.\n\nParameters\n----------\nquery : str\n A string in the Overpass QL format.\n\nReturns\n-------\ndata : dict", "id": "f3881:m2"} {"signature": "def pdna_network_from_bbox(lat_min=None, lng_min=None, lat_max=None, lng_max=None, bbox=None,network_type='', two_way=True,timeout=, memory=None, max_query_area_size= * * * ):", "body": "nodes, edges = network_from_bbox(lat_min=lat_min, lng_min=lng_min,lat_max=lat_max, lng_max=lng_max,bbox=bbox, network_type=network_type,two_way=two_way, timeout=timeout,memory=memory,max_query_area_size=max_query_area_size)return Network(nodes[''], nodes[''],edges[''], edges[''], edges[['']])", "docstring": "Make a Pandana network from a bounding lat/lon box\nrequest to the Overpass API. Distance will be in the default units meters.\n\nParameters\n----------\nlat_min, lng_min, lat_max, lng_max : float\nbbox : tuple\n Bounding box formatted as a 4 element tuple:\n (lng_max, lat_min, lng_min, lat_max)\nnetwork_type : {'walk', 'drive'}, optional\n Specify whether the network will be used for walking or driving.\n A value of 'walk' attempts to exclude things like freeways,\n while a value of 'drive' attempts to exclude things like\n bike and walking paths.\ntwo_way : bool, optional\n Whether the routes are two-way. If True, node pairs will only\n occur once.\ntimeout : int, optional\n the timeout interval for requests and to pass to Overpass API\nmemory : int, optional\n server memory allocation size for the query, in bytes.\n If none, server will use its default allocation size\nmax_query_area_size : float, optional\n max area for any part of the geometry, in the units the geometry is in\n\nReturns\n-------\nnetwork : pandana.Network", "id": "f3881:m0"} {"signature": "@classmethoddef from_hdf5(cls, filename):", "body": "return ph5.network_from_pandas_hdf5(cls, filename)", "docstring": "Load a previously saved Network from a Pandas HDF5 file.\n\nParameters\n----------\nfilename : str\n\nReturns\n-------\nnetwork : pandana.Network", "id": "f3884:c0:m1"} {"signature": "def save_hdf5(self, filename, rm_nodes=None):", "body": "ph5.network_to_pandas_hdf5(self, filename, rm_nodes)", "docstring": "Save network data to a Pandas HDF5 file.\n\nOnly the nodes and edges of the actual network are saved,\npoints-of-interest and data attached to nodes are not saved.\n\nParameters\n----------\nfilename : str\nrm_nodes : array_like\n A list, array, Index, or Series of node IDs that should *not*\n be saved as part of the Network.", "id": "f3884:c0:m2"} {"signature": "def get_node_ids(self, x_col, y_col, mapping_distance=None):", "body": "xys = pd.DataFrame({'': x_col, '': y_col})distances, indexes = self.kdtree.query(xys.as_matrix())indexes = np.transpose(indexes)[]distances = np.transpose(distances)[]node_ids = self.nodes_df.iloc[indexes].indexdf = pd.DataFrame({\"\": node_ids, \"\": distances},index=xys.index)if mapping_distance is not None:df = df[df.distance <= mapping_distance]return df.node_id", "docstring": "Assign node_ids to data specified by x_col and y_col\n\nParameters\n----------\nx_col : Pandas series (float)\n A Pandas Series where values specify the x (e.g. longitude)\n location of dataset.\ny_col : Pandas series (float)\n A Pandas Series where values specify the y (e.g. latitude)\n location of dataset. x_col and y_col should use the same index.\nmapping_distance : float, optional\n The maximum distance that will be considered a match between the\n x, y data and the nearest node in the network. This will usually\n be a distance unit in meters however if you have customized the\n impedance this could be in other units such as utility or time\n etc. If not specified, every x, y coordinate will be mapped to\n the nearest node.\n\nReturns\n-------\nnode_ids : Pandas series (int)\n Returns a Pandas Series of node_ids for each x, y in the\n input data. The index is the same as the indexes of the\n x, y input data, and the values are the mapped node_ids.\n If mapping distance is not passed and if there are no nans in the\n x, y data, this will be the same length as the x, y data.\n If the mapping is imperfect, this function returns all the\n input x, y's that were successfully mapped to node_ids.", "id": "f3884:c0:m13"} {"signature": "def low_connectivity_nodes(self, impedance, count, imp_name=None):", "body": "self.set(self.node_ids.to_series(), name='')agg = self.aggregate(impedance, type='', imp_name=imp_name, name='')return np.array(agg[agg < count].index)", "docstring": "Identify nodes that are connected to fewer than some threshold\nof other nodes within a given distance.\n\nParameters\n----------\nimpedance : float\n Distance within which to search for other connected nodes. This\n will usually be a distance unit in meters however if you have\n customized the impedance this could be in other units such as\n utility or time etc.\ncount : int\n Threshold for connectivity. If a node is connected to fewer\n than this many nodes within `impedance` it will be identified\n as \"low connectivity\".\nimp_name : string, optional\n The impedance name to use for the aggregation on this network.\n Must be one of the impedance names passed in the constructor of\n this object. If not specified, there must be only one impedance\n passed in the constructor, which will be used.\n\nReturns\n-------\nnode_ids : array\n List of \"low connectivity\" node IDs.", "id": "f3884:c0:m17"} {"signature": "def accuracy(conf_matrix):", "body": "total, correct = , for true_response, guess_dict in list(conf_matrix.items()):for guess, count in list(guess_dict.items()):if true_response == guess:correct += counttotal += countreturn correct/total", "docstring": "Given a confusion matrix, returns the accuracy.\nAccuracy Definition: http://research.ics.aalto.fi/events/eyechallenge2005/evaluation.shtml", "id": "f3889:m0"} {"signature": "def list_to_tf_input(data, response_index, num_outcomes):", "body": "matrix = np.matrix([row[:response_index] + row[response_index+:] for row in data])outcomes = np.asarray([row[response_index] for row in data], dtype=np.uint8)return matrix, outcomes", "docstring": "Separates the outcome feature from the data.", "id": "f3905:m0"} {"signature": "def expand_and_standardize_dataset(response_index, response_header, data_set, col_vals, headers, standardizers, feats_to_ignore, columns_to_expand, outcome_trans_dict):", "body": "modified_set = []for row_index, row in enumerate(data_set):new_row = []for col_index, val in enumerate(row):header = headers[col_index]if col_index == response_index:new_outcome = outcome_trans_dict[val]new_row.append(new_outcome)elif header in feats_to_ignore:passelif header in columns_to_expand:for poss_val in col_vals[header]:if val == poss_val:new_cat_val = else:new_cat_val = -new_row.append(new_cat_val)else:new_cont_val = float((val - standardizers[header]['']) / standardizers[header][''])new_row.append(new_cont_val)modified_set.append(new_row)expanded_headers = []for header in headers:if header in feats_to_ignore:passelif (header in columns_to_expand) and (header is not response_header):for poss_val in col_vals[header]:new_header = ''.format(header,poss_val)expanded_headers.append(new_header)else:expanded_headers.append(header)return modified_set, expanded_headers", "docstring": "Standardizes continuous features and expands categorical features.", "id": "f3908:m1"} {"signature": "def list_to_tf_input(data, response_index, num_outcomes):", "body": "matrix = np.matrix([row[:response_index] + row[response_index+:] for row in data])outcomes = np.asarray([row[response_index] for row in data], dtype=np.uint8)outcomes_onehot = (np.arange(num_outcomes) == outcomes[:, None]).astype(np.float32)return matrix, outcomes_onehot", "docstring": "Separates the outcome feature from the data and creates the onehot vector for each row.", "id": "f3911:m0"} {"signature": "def __init__(self, all_data, feature_to_repair, repair_level, kdd, features_to_ignore=[]):", "body": "self.all_data = all_dataself.feature_to_repair = feature_to_repairself.repair_level = repair_levelself.kdd = kddself.features_to_ignore = features_to_ignore", "docstring": "all_data should be a list of rows (ie, a list of lists) composing the entire\ntest and training dataset. Headers should not be included in data sets.\n\nfeature_to_repair should be the index of the feature to repair. (ie, 0 to k)\nwhere k is the number of features in the dataset.\n\nrepair_level should be a float between [0,1] representing the level of repair.\n\nfeatures_to_ignore should be a list of feature indexes that should be ignored.", "id": "f3941:c0:m0"} {"signature": "@abstractmethoddef repair(self, data_to_repair):", "body": "pass", "docstring": "data_to_repair is the list of rows that actually should be repaired.", "id": "f3941:c0:m1"} {"signature": "def get_median(values, kdd):", "body": "if not values:raise Exception(\"\")sorted_values = deepcopy(values)sorted_values.sort() if kdd:return sorted_values[len(values)/]else:if len(values) % == :return sorted_values[len(values)/-]else:return sorted_values[len(values)/]", "docstring": "Given an unsorted list of numeric values, return median value (as a float).\nNote that in the case of even-length lists of values, we apply the value to\nthe left of the center to be the median (such that the median can only be\na value from the list of values).\nEg: get_median([1,2,3,4]) == 2, not 2.5.", "id": "f3945:m0"} {"signature": "def _blame_line(self, traceback):", "body": "key = Noneblamed_entry = Noneemail_recipients = []for stack_line in traceback:line_type = self._get_line_type(stack_line)if line_type == api_ttypes.LineType.THIRDPARTY_WHITELIST:return None, None, None, Trueelif line_type in [api_ttypes.LineType.DEFAULT, api_ttypes.LineType.KNOWN_ERROR]:filepath = self._get_basepath(stack_line.filename)entry = api_ttypes.CodeIdentifier(filepath, stack_line.function_name, stack_line.text)blamed_entry = entrykey = api_ttypes.ErrorKey(filepath, stack_line.line_number, stack_line.function_name, stack_line.text)if filepath in self.watch_all_errors:email_recipients.extend(self.watch_all_errors[filepath])return (key, blamed_entry, email_recipients, False)", "docstring": "Figures out which line in traceback is to blame for the error.\n Returns a 3-tuple of (ErrorKey, StackTraceEntry, [email recipients])", "id": "f3985:c0:m8"} {"signature": "def migrate_thrift_obj(self, obj):", "body": "if not hasattr(obj, \"\"):returnobj_key_set = set(obj.__dict__.keys())thrift_field_map = {t[]: t[] for t in obj.thrift_spec if t}obj.__dict__.update({f: copy.copy(thrift_field_map[f]) for f in set(thrift_field_map.keys()) - obj_key_set})for value in obj.__dict__.values():self.migrate_thrift_obj(value)", "docstring": "Helper function that can be called when serializing/deserializing thrift objects whose definitions\n have changed, we need to make sure we initialize the new attributes to their default value", "id": "f3996:c0:m4"} {"signature": "@abc.abstractmethoddef iteritems(self):", "body": "pass", "docstring": "Should return iterator of tuples (key, value) for all entries for the given self.partition", "id": "f3996:c0:m5"} {"signature": "def open(self):", "body": "pass", "docstring": "Called to create connection to storage", "id": "f3996:c0:m1"} {"signature": "@propertydef detail(self):", "body": "if hasattr(self, ''):return self._detailelse:self.get_detail()return self._detail", "docstring": "\u4e2a\u4eba\u4fe1\u606f,\u5982\u679c\u672a\u8c03\u7528\u8fc7``get_detail()``\u4f1a\u81ea\u52a8\u8c03\u7528\n\n:return: information of student\n:rtype: dict", "id": "f4089:c1:m9"} {"signature": "@propertydef _echo(self):", "body": "return random.randint(, )", "docstring": "\u751f\u6210\u4e00\u4e2a\u968f\u673a\u6570,\u7528\u6765\u505a\u6570\u636e\u6821\u9a8c\n\n:return:\n:rtype: int", "id": "f4089:c1:m4"} {"signature": "def get_comment_lesson_info(self): ", "body": "echo = self._echoresponse = self._post('',data=self._aodata(echo, ['', '', '', '', '']), )if self._check_response(response, echo=echo):return response['']['']else:self._unexpected(response)", "docstring": "\u83b7\u53d6\u6559\u5b66\u8bc4\u4f30\u5185\u6240\u6709\u9700\u8981\u8bfe\u7a0b\n\n:return: \u8fd4\u56de\u6240\u4ee5\u6709\u9700\u8981\u8fdb\u884c\u6559\u5b66\u8bc4\u4f30\u7684\u8bfe\u7a0b\n:rtype: list", "id": "f4089:c1:m22"} {"signature": "def login(self):", "body": "if not hasattr(self, ''):self.last_connect = time.time()s = requests.session()s.get('')data = {'': self.student_id,'': self.password_md5}r6 = s.post('', headers={'': self._ua},data=data)if r6.text == '':return selse:s.close()raise AuthFailure(r6.text)", "docstring": "\u767b\u9646\u7cfb\u7edf,\u8fd4\u56de\u4e00\u4e2arequests\u7684session\u5bf9\u8c61\n\n:return: session with login cookies\n:rtype: requests.sessions.Session", "id": "f4089:c1:m5"} {"signature": "def __init__(self, student_id, password, user_agent=None):", "body": "self.student_id = student_idself.password = passwordself.password_md5 = hashlib.md5(self.password.encode('')).hexdigest()self.post_headers = {'': '','': ''}self._ua = ''''if user_agent:self._ua = user_agentself.session = self.login()", "docstring": ":param student_id: student_id of jw system\n:type student_id: str\n:param password: password\n:type password: str\n:param user_agent: User-agent you want to use when requesting the website, default ua is chrome 60.0.3112.113\n:type user_agent: str", "id": "f4089:c1:m0"} {"signature": "def get_raw_now_score(self):", "body": "echo = self._echoresponse = self._post(\"\",data=self._aodata(echo,columns=[\"\", \"\", \"\", \"\", \"\",\"\", \"\",\"\",\"\"]))if self._check_response(response, echo):self._raw_now_score = responsereturn self._raw_now_scoreelse:self._unexpected(response)", "docstring": ":rtype: dict\n:return: list[dict]", "id": "f4089:c1:m13"} {"signature": "@classmethoddef from_yaml(cls, defaults, **kwargs):", "body": "if \"\" not in defaults:kwargs[\"\"] = Nonedefaults = copy.deepcopy(defaults)return cls(defaults=defaults,token=kwargs.pop(\"\"),directory=kwargs.pop(\"\"),**kwargs)", "docstring": "Creates a new instance of a rule by merging two dictionaries.\n\n This allows for independant configuration files to be merged\n into the defaults.", "id": "f4091:c2:m0"} {"signature": "def execute_actions(self, cwd):", "body": "self._execute_globals(cwd)for action in self.actions:logger.info(\"\".format(action))p = subprocess.Popen(action, shell=True, cwd=cwd)p.wait()", "docstring": "Iterates over the actions and executes them in order.", "id": "f4091:c1:m4"} {"signature": "def main():", "body": "parser = create_parser()args = parser.parse_args()check_arguments(args, parser)run(parser, args)", "docstring": "Initialize and run command line interface.", "id": "f4106:m7"} {"signature": "def clean_code(code, comments=True, macros=False, pragmas=False):", "body": "if macros or pragmas:lines = code.split('')in_macro = Falsein_pragma = Falsefor i in range(len(lines)):l = lines[i].strip()if macros and (l.startswith('') and not l.startswith('') or in_macro):lines[i] = ''in_macro = l.endswith('')if pragmas and (l.startswith('') or in_pragma):lines[i] = ''in_pragma = l.endswith('')code = ''.join(lines)if comments:idx = comment_start = Nonewhile idx < len(code) - :if comment_start is None and code[idx:idx + ] == '':end_idx = code.find('', idx)code = code[:idx] + code[end_idx:]idx -= end_idx - idxelif comment_start is None and code[idx:idx + ] == '':comment_start = idxelif comment_start is not None and code[idx:idx + ] == '':code = (code[:comment_start] +'' * code[comment_start:idx].count('') +code[idx + :])idx -= idx - comment_startcomment_start = Noneidx += return code", "docstring": "Naive comment and macro striping from source code\n\n:param comments: If True, all comments are stripped from code\n:param macros: If True, all macros are stripped from code\n:param pragmas: If True, all pragmas are stripped from code\n\n:return: cleaned code. Line numbers are preserved with blank lines,\nand multiline comments and macros are supported. BUT comment-like\nstrings are (wrongfully) treated as comments.", "id": "f4107:m0"} {"signature": "def report(self, output_file=sys.stdout):", "body": "if self.verbose > :print(\"\", file=output_file)print(self.results[''], file=output_file)print('', file=output_file)if self.verbose > :print(''.format(self.results['']),file=output_file)print(''.format(self.results['']), file=output_file)print('', str(self.results['']), file=output_file)print('', str(self.results['']), file=output_file)print(''.format(self.results[''][self._args.unit]),file=output_file)print(''.format(self.results['']), file=output_file)print(''.format(self.results['']), file=output_file)", "docstring": "Print generated model data in human readable format.", "id": "f4108:c1:m4"} {"signature": "def report(self, output_file=sys.stdout):", "body": "report = ''if self.verbose > :self._CPU.report()self._data.report()report += ''.format(self.results[''],self.results[''],''.join([''.format(i[]) for i in self.results['']]))if self._args.cores > :report += \"\"report += ''.format(self.results[''][self._args.unit])report += ''.format(max(self.results[''], self.results['']),''.join([''.format(max(sum([x[] for x in self.results[''][:i+]]) +self.results[''], self.results['']))for i in range(len(self.results['']))]))if self._args.cores > :report += \"\"report += ''.format(self.results[''])if self.results['']:report += \"\".format(self.results['']['']) +\"\"report += \"\".format(self.results[''][''][self._args.unit],''.join(self.results['']['']))if self.results['']:report += \"\"\"\"if self.machine[''] > self.machine['']:report += \"\" + (len(self._args.unit) - ) * '' + '' +'' * (self.machine['']-) + ''report += \"\" + (len(self._args.unit)+)*'' + \"\" + ''.join([''.format(s['']) for s in self.results['']]) + ''report += \"\".format(self._args.unit) + ''.join([''.format(float(s[''][self._args.unit]))for s in self.results['']]) + ''print(report, file=output_file)if self._args and self._args.ecm_plot:assert plot_support, \"\"fig = plt.figure(frameon=False)self.plot(fig)", "docstring": "Print generated model data in human readable format.", "id": "f4108:c2:m3"} {"signature": "def plot(self, fig=None):", "body": "if not fig:fig = plt.gcf()fig.subplots_adjust(left=, right=, top=, bottom=)ax = fig.add_subplot(, , )sorted_overlapping_ports = sorted([(p, self.results[''][p]) for p in self.machine['']],key=lambda x: x[])yticks_labels = []yticks = []xticks_labels = []xticks = []height = i = colors = ([( / , / , / )] +[( / , / , / )] * (len(sorted_overlapping_ports) - ))for p, c in sorted_overlapping_ports:ax.barh(i, c, height, align='', color=colors.pop(),edgecolor=(, , ), linestyle='')if i == len(sorted_overlapping_ports) - :ax.text(c / , i, '', ha='', va='')yticks_labels.append(p)yticks.append(i)i += xticks.append(sorted_overlapping_ports[-][])xticks_labels.append(''.format(sorted_overlapping_ports[-][]))y = colors = [( / , / , / )] * (len(self.results[''])) +[( / , / , / )]for k, v in [('', self.results[''])] + self.results['']:ax.barh(i, v, height, y, align='', color=colors.pop())ax.text(y + v / , i, '' + k + '', ha='', va='')xticks.append(y + v)xticks_labels.append(''.format(y + v))y += vyticks_labels.append('')yticks.append(i)ax.tick_params(axis='', which='', left='', right='')ax.tick_params(axis='', which='', top='')ax.set_xlabel('')ax.set_ylabel('')ax.set_yticks(yticks)ax.set_yticklabels(yticks_labels)ax.set_xticks(xticks)ax.set_xticklabels(xticks_labels, rotation='')ax.xaxis.grid(alpha=, linestyle='')fig.savefig(self._args.ecm_plot)", "docstring": "Plot visualization of model prediction.", "id": "f4108:c2:m4"} {"signature": "def analyze(self):", "body": "self.calculate_cache_access()self.calculate_cycles()self.results[''] = sum(self.kernel._flops.values())return self.results", "docstring": "Run complete anaylysis and return results.", "id": "f4108:c0:m3"} {"signature": "def __init__(self, kernel, machine, args=None, parser=None, asm_block='',pointer_increment='', verbose=):", "body": "self.kernel = kernelself.machine = machineself._args = argsself._parser = parserself.results = {}if args:self.asm_block = self._args.asm_blockself.pointer_increment = self._args.pointer_incrementself.verbose = self._args.verboseelse:self.asm_block = asm_blockself.pointer_increment = pointer_incrementself.verbose = verboseif self.asm_block not in ['', '']:try:self.asm_block = int(args.asm_block)except ValueError:parser.error('')if self.pointer_increment not in ['', '', '']:try:self.pointer_increment = int(args.pointer_increment)except ValueError:parser.error('''')", "docstring": "Create Execution-Cache-Memory model from kernel and machine objects.\n\n*kernel* is a Kernel object\n*machine* describes the machine (cpu, cache and memory) characteristics\n*args* (optional) are the parsed arguments from the comand line\nif *args* is given also *parser* has to be provided\n\nIf *args* is None, *asm_block*, *pointer_increment* and *verbose* will be used, otherwise\n*args* takes precedence.", "id": "f4108:c1:m1"} {"signature": "def blocking(indices, block_size, initial_boundary=):", "body": "blocks = []for idx in indices:bl_idx = (idx-initial_boundary)//float(block_size)if bl_idx not in blocks:blocks.append(bl_idx)blocks.sort()return blocks", "docstring": "Split list of integers into blocks of block_size and return block indices.\n\nFirst block element will be located at initial_boundary (default 0).\n\n>>> blocking([0, -1, -2, -3, -4, -5, -6, -7, -8, -9], 8)\n[0,-1]\n>>> blocking([0], 8)\n[0]\n>>> blocking([0], 8, initial_boundary=32)\n[-4]", "id": "f4108:m1"} {"signature": "@classmethoddef configure_arggroup(cls, parser):", "body": "parser.add_argument('',help='')", "docstring": "Configure argument parser.", "id": "f4108:c2:m0"} {"signature": "def conv_cy(self, cy_cl):", "body": "if not isinstance(cy_cl, PrefixedUnit):cy_cl = PrefixedUnit(cy_cl, '', '')clock = self.machine['']element_size = self.kernel.datatypes_size[self.kernel.datatype]elements_per_cacheline = int(self.machine['']) // element_sizeit_s = clock/cy_cl*elements_per_cachelineit_s.unit = ''flops_per_it = sum(self.kernel._flops.values())performance = it_s*flops_per_itperformance.unit = ''cy_it = cy_cl*elements_per_cachelinecy_it.unit = ''return {'': it_s,'': cy_cl,'': cy_it,'': performance}", "docstring": "Convert cycles (cy/CL) to other units, such as FLOP/s or It/s.", "id": "f4108:c1:m3"} {"signature": "def analyze(self):", "body": "try:incore_analysis, asm_block = self.kernel.iaca_analysis(micro_architecture=self.machine[''],asm_block=self.asm_block,pointer_increment=self.pointer_increment,verbose=self.verbose > )except RuntimeError as e:print(\"\" + str(e))sys.exit()block_throughput = incore_analysis['']port_cycles = incore_analysis['']uops = incore_analysis['']elements_per_block = abs(asm_block['']// self.kernel.datatypes_size[self.kernel.datatype])block_size = elements_per_block*self.kernel.datatypes_size[self.kernel.datatype]try:block_to_cl_ratio = float(self.machine[''])/block_sizeexcept ZeroDivisionError as e:print(\"\", e, file=sys.stderr)sys.exit()port_cycles = dict([(i[], i[]*block_to_cl_ratio) for i in list(port_cycles.items())])uops = uops*block_to_cl_ratiocl_throughput = block_throughput*block_to_cl_ratioT_OL = max([v for k, v in list(port_cycles.items())if k in self.machine['']['']])T_nOL = max([v for k, v in list(port_cycles.items())if k in self.machine['']['']])if T_nOL < cl_throughput:T_OL = cl_throughputself.results = {'': port_cycles,'': self.conv_cy(cl_throughput),'': uops,'': T_nOL,'': T_OL,'': incore_analysis[''],'': elements_per_block,'': asm_block[''],'': sum(self.kernel._flops.values())}return self.results", "docstring": "Run complete analysis and return results.", "id": "f4108:c1:m2"} {"signature": "def calculate_cache_access(self):", "body": "self.results.update({'': [], '': self.predictor.get_misses(),'': self.predictor.get_hits(),'': self.predictor.get_evicts(),'': self.predictor.get_infos()})", "docstring": "Dispatch to cache predictor to get cache stats.", "id": "f4108:c0:m1"} {"signature": "def calculate_cache_access(self):", "body": "element_size = self.kernel.datatypes_size[self.kernel.datatype]results = {'': {}}def sympy_compare(a, b):c = for i in range(min(len(a), len(b))):s = a[i] - b[i]if sympy.simplify(s > ):c = -elif sympy.simplify(s == ):c = else:c = if c != :breakreturn caccesses = defaultdict(list)sympy_accesses = defaultdict(list)for var_name in self.kernel.variables:for r in self.kernel.sources.get(var_name, []):if r is None:continueaccesses[var_name].append(r)sympy_accesses[var_name].append(self.kernel.access_to_sympy(var_name, r))for w in self.kernel.destinations.get(var_name, []):if w is None:continueaccesses[var_name].append(w)sympy_accesses[var_name].append(self.kernel.access_to_sympy(var_name, w))accesses[var_name].sort(key=cmp_to_key(sympy_compare), reverse=True)results[''] = accessesresults[''] = sympy_accessesfor dimension in range(, len(list(self.kernel.get_loop_stack()))+):results[''][dimension] = {}slices = defaultdict(list)slices_accesses = defaultdict(list)for var_name in accesses:for a in accesses[var_name]:slice_id = tuple([var_name, tuple(a[:-dimension])])slices[slice_id].append(a)slices_accesses[slice_id].append(self.kernel.access_to_sympy(var_name, a))results[''][dimension][''] = slicesresults[''][dimension][''] = slices_accessesslices_distances = defaultdict(list)for k, v in slices_accesses.items():for i in range(, len(v)):slices_distances[k].append((v[i] - v[i-]).simplify())results[''][dimension][''] = slices_distancesfor dist in chain(*slices_distances.values()):if any([s not in self.kernel.constants.keys() for s in dist.free_symbols]):raise ValueError(\"\"+str(dist))slices_sum = sum([sum(dists) for dists in slices_distances.values()])results[''][dimension][''] = slices_sumdef FuckedUpMax(*args):if len(args) == :return args[]args = [a.expand() for a in args]max_symbols = max([len(a.free_symbols) for a in args])args = list(filter(lambda a: len(a.free_symbols) == max_symbols, args))if max_symbols == :return sympy.Max(*args)max_coeffs = for a in args:for s in a.free_symbols:max_coeffs = max(max_coeffs, len(sympy.Poly(a, s).all_coeffs()))def coeff_filter(a):return max(, ,*[len(sympy.Poly(a, s).all_coeffs()) for s in a.free_symbols]) == max_coeffsargs = list(filter(coeff_filter, args))m = sympy.Max(*args)return mslices_max = FuckedUpMax(sympy.Integer(),*[FuckedUpMax(*dists) for dists in slices_distances.values()])results[''][dimension][''] = slices_maxslices_count = len(slices_accesses)results[''][dimension][''] = slices_countcache_requirement_bytes = (slices_sum + slices_max*slices_count)*element_sizeresults[''][dimension][''] = cache_requirement_bytescsim = self.machine.get_cachesim(self._args.cores)results[''][dimension][''] = {}for cl in csim.levels(with_mem=False):cache_equation = sympy.Eq(cache_requirement_bytes, cl.size())if len(self.kernel.constants.keys()) <= :inequality = sympy.solve(sympy.LessThan(cache_requirement_bytes, cl.size()),*self.kernel.constants.keys())else:inequality = sympy.LessThan(cache_requirement_bytes, cl.size())try:eq = sympy.solve(inequality, *self.kernel.constants.keys(), dict=True)except NotImplementedError:eq = Noneresults[''][dimension][''][cl.name] = {'': cl.size(),'': cache_equation,'': inequality,'': eq}return results", "docstring": "Apply layer condition model to calculate cache accesses.", "id": "f4111:c0:m2"} {"signature": "def analyze(self):", "body": "loop_stack = list(self.kernel.get_loop_stack())if any([l[''] != for l in loop_stack]):raise ValueError(\"\"\"\")for aref in list(self.kernel.index_order()):while aref and len(aref[]) == :aref.pop()for i, idx_names in enumerate(aref):if i >= len(loop_stack) orany([loop_stack[i][''] != idx.name for idx in idx_names]):raise ValueError(\"\"\"\"\"\")for arefs in chain(chain(*self.kernel.sources.values()),chain(*self.kernel.destinations.values())):if not arefs:continuewhile arefs and not arefs[].free_symbols:arefs = arefs[:]for i, expr in enumerate(arefs):diff = sympy.diff(expr, sympy.Symbol(loop_stack[i]['']))if diff != and diff != :raise ValueError(\"\"\"\")self.results = self.calculate_cache_access()", "docstring": "Run complete analysis.", "id": "f4111:c0:m3"} {"signature": "def __init__(self, kernel, machine, args=None, parser=None):", "body": "self.kernel = kernelself.machine = machineself._args = argsself._parser = parserself.results = Noneif args:pass", "docstring": "Create layer condition model from kernel and machine objects.\n\n*kernel* is a Kernel object\n*machine* describes the machine (cpu, cache and memory) characteristics\n*args* (optional) are the parsed arguments from the comand line", "id": "f4111:c0:m1"} {"signature": "@classmethoddef configure_arggroup(cls, parser):", "body": "pass", "docstring": "Configure arugment parser", "id": "f4111:c0:m0"} {"signature": "def report(self, output_file=sys.stdout):", "body": "max_perf = self.results['']if self._args and self._args.verbose >= :print(''.format(pformat(self.results)), file=output_file)if self._args and self._args.verbose >= :print(''.format(pformat(self.results[''])), file=output_file)print('', file=output_file)print('',file=output_file)print('',file=output_file)print(''.format(max_perf[self._args.unit]),file=output_file)for b in self.results['']:print(''''.format(b[''][self._args.unit], **b),file=output_file)print('', file=output_file)if self.results[''][''] > max_perf['']:print(''.format(max_perf), file=output_file)else:print('', file=output_file)bottleneck = self.results[''][self.results['']]print(''.format(bottleneck[''][self._args.unit],bottleneck[''],bottleneck['']),file=output_file)print(''.format(bottleneck['']),file=output_file)", "docstring": "Report analysis outcome in human readable form.", "id": "f4112:c0:m5"} {"signature": "@classmethoddef configure_arggroup(cls, parser):", "body": "pass", "docstring": "Configure argument parser.", "id": "f4112:c0:m0"} {"signature": "def analyze(self):", "body": "self.results = self.calculate_cache_access()try:iaca_analysis, asm_block = self.kernel.iaca_analysis(micro_architecture=self.machine[''],asm_block=self.asm_block,pointer_increment=self.pointer_increment,verbose=self.verbose > )except RuntimeError as e:print(\"\" + str(e))sys.exit()block_throughput = iaca_analysis['']uops = iaca_analysis['']iaca_output = iaca_analysis['']port_cycles = iaca_analysis['']elements_per_block = abs(asm_block['']/ self.kernel.datatypes_size[self.kernel.datatype])block_size = elements_per_block*self.kernel.datatypes_size[self.kernel.datatype]try:block_to_cl_ratio = float(self.machine[''])/block_sizeexcept ZeroDivisionError as e:print(\"\", e, file=sys.stderr)sys.exit()port_cycles = dict([(i[], i[]*block_to_cl_ratio) for i in list(port_cycles.items())])uops = uops*block_to_cl_ratiocl_throughput = block_throughput*block_to_cl_ratioflops_per_element = sum(self.kernel._flops.values())self.results[''][] = Noneself.results[''] = self.conv_perf(PrefixedUnit(float(''), ''))self.results[''] = Nonefor level, bottleneck in enumerate(self.results['']):if level == :continueif bottleneck[''][''] < self.results['']['']:self.results[''] = levelself.results[''] = bottleneck['']self.results.update({'': {'': port_cycles,'': cl_throughput,'': uops,'': self.conv_perf(PrefixedUnit(self.machine['']/block_throughput*elements_per_block*flops_per_element* self.cores, \"\")),'': iaca_output}})", "docstring": "Run complete analysis.", "id": "f4112:c1:m2"} {"signature": "def perfctr(self, cmd, group='', code_markers=True):", "body": "if find_executable('') is None:print(\"\",file=sys.stderr)sys.exit()perf_cmd = ['', '', '', '', group]cpu = ''if self._args.cores > :cpu += ''+str(self._args.cores-)perf_cmd += ['', cpu]perf_cmd.append('')perf_cmd += cmdif self.verbose > :print(''.join(perf_cmd))try:with fix_env_variable('', None):output = subprocess.check_output(perf_cmd).decode('').split('')except subprocess.CalledProcessError as e:print(\"\".format(e), file=sys.stderr)sys.exit()results = {}for line in output:line = line.split('')try:results[line[]] = float(line[])except ValueError:passexcept IndexError:continuetry:if line[] == '' or line[] == '':counter_value = else:counter_value = int(line[])if re.fullmatch(r'', line[]) and re.fullmatch(r'', line[]):results.setdefault(line[], {})results[line[]][line[]] = counter_valueexcept (IndexError, ValueError):passreturn results", "docstring": "Run *cmd* with likwid-perfctr and returns result as dict.\n\n*group* may be a performance group known to likwid-perfctr or an event string.\n\nif CLI argument cores > 1, running with multi-core, otherwise single-core", "id": "f4113:c1:m2"} {"signature": "def report(self, output_file=sys.stdout):", "body": "if self.verbose > :with pprint_nosort():pprint.pprint(self.results)if self.verbose > :print(''.format(self.results['']),file=output_file)if self.verbose > :print(''.format(self.results['']),file=output_file)print(''.format(self.results['']),file=output_file)print(''.format(self.results['']),file=output_file)print(''.format(self.results['']),file=output_file)print(''.format(self.results['']),file=output_file)print(''.format(self.results['']),file=output_file)if self.verbose > :print(''.format(self.results['']),file=output_file)print('', file=output_file)if not self.no_phenoecm:print(\"\")print(\"\".format(\"\"), end='')for metrics in self.results[''].values():for metric_name in sorted(metrics):print(\"\".format(metric_name), end='')print()breakfor cache, metrics in sorted(self.results[''].items()):print(\"\".format(cache), end='')for k, v in sorted(metrics.items()):print(\"\".format(v), end='')print()print()print(''''.format(**{k: float(v) for k, v in self.results[''].items()}),file=output_file)print('''''',file=output_file)", "docstring": "Report gathered analysis data in human readable form.", "id": "f4113:c1:m4"} {"signature": "def group_iterator(group):", "body": "ordered_chars = string.ascii_letters + string.digitstokenizer = ('''')for m in re.finditer(tokenizer, group):if m.group(''):start, sep, end = m.group('')for i in range(ordered_chars.index(start), ordered_chars.index(end) + ):yield ordered_chars[i]else:yield m.group('')", "docstring": "Iterate over simple regex-like groups.\n\nThe only special character is a dash (-), which take the preceding and the following chars to\ncompute a range. If the range is non-sensical (e.g., b-a) it will be empty\n\nExample:\n>>> list(group_iterator('a-f'))\n['a', 'b', 'c', 'd', 'e', 'f']\n>>> list(group_iterator('148'))\n['1', '4', '8']\n>>> list(group_iterator('7-9ab'))\n['7', '8', '9', 'a', 'b']\n>>> list(group_iterator('0B-A1'))\n['0', '1']", "id": "f4113:m2"} {"signature": "def get_infos(self):", "body": "first_dim_factor = self.first_dim_factorinfos = {'': [], '': self.stats,'': first_dim_factor}for cache_level, cache_info in list(enumerate(self.machine[''])):infos[''].append({'': len(infos['']),'': ''.format(cache_info['']),'': self.stats[cache_level]['']/first_dim_factor,'': self.stats[cache_level]['']/first_dim_factor,'': self.stats[cache_level]['']/first_dim_factor,'': self.stats[cache_level]['']/first_dim_factor,'': self.stats[cache_level]['']/first_dim_factor,'': self.stats[cache_level]['']/first_dim_factor,'': self.stats[cache_level]['']/first_dim_factor,'': self.stats[cache_level]['']/first_dim_factor,'': self.stats[cache_level]['']/first_dim_factor,'': self.stats[cache_level]['']/first_dim_factor,'': None})return infos", "docstring": "Return verbose information about the predictor.", "id": "f4114:c2:m8"} {"signature": "def get_misses(self):", "body": "return [c[''] for c in self.results['']]+[]", "docstring": "Return a list with number of missed cache lines per memory hierarchy level.", "id": "f4114:c1:m3"} {"signature": "def get_evicts(self):", "body": "return [c[''] for c in self.results['']]+[]", "docstring": "Return a list with number of evicted cache lines per memory hierarchy level.", "id": "f4114:c1:m5"} {"signature": "def get_evicts(self):", "body": "raise NotImplementedError(\"\")", "docstring": "Return a list with number of evicted cache lines per memory hierarchy level.", "id": "f4114:c0:m5"} {"signature": "def get_evicts(self):", "body": "return [self.stats[cache_level]['']/self.first_dim_factorfor cache_level in range(len(self.machine['']))]", "docstring": "Return a list with number of evicted cache lines per memory hierarchy level.", "id": "f4114:c2:m7"} {"signature": "def __init__(self, kernel, machine, cores=):", "body": "CachePredictor.__init__(self, kernel, machine, cores=cores)loop_stack = list(self.kernel.get_loop_stack())if any([l[''] != for l in loop_stack]):raise ValueError(\"\"\"\")index_order = [symbol_pos_int(l['']) for l in loop_stack]for var_name, arefs in chain(self.kernel.sources.items(), self.kernel.destinations.items()):if next(iter(arefs)) is None:continuefor a in [self.kernel.access_to_sympy(var_name, a) for a in arefs]:for t in a.expand().as_ordered_terms():idx = t.free_symbols.intersection(index_order)if not idx:continueif len(idx) != :raise ValueError(\"\"\"\".format(t))else: idx = idx.pop()pow_dict = {k: v for k, v in t.as_powers_dict().items()if k != idx}stride_dim = sum(pow_dict.values())error = Falsetry:if loop_stack[-stride_dim-][''] != idx.name:error = Trueexcept IndexError:error = Trueif error:raise ValueError(\"\"\"\"\"\".format(t))inner_index = symbol_pos_int(loop_stack[-][''])inner_increment = loop_stack[-]['']for arefs in chain(chain(*self.kernel.sources.values()),chain(*self.kernel.destinations.values())):if arefs is None:continuefor expr in arefs:diff = expr.subs(inner_index, +inner_increment) - expr.subs(inner_index, )if diff != and diff != :raise ValueError(\"\"\"\")element_size = self.kernel.datatypes_size[self.kernel.datatype]accesses = {}destinations = set()distances = []for var_name in self.kernel.variables:accesses[var_name] = self.kernel.sources.get(var_name, set()).union(self.kernel.destinations.get(var_name, set()))if not any(accesses[var_name]) or not any([a == inner_index or a.coeff(inner_index) != for a in chain.from_iterable(accesses[var_name])]):continuedestinations.update([(var_name, tuple(r)) for r in self.kernel.destinations.get(var_name, [])])acs = accesses[var_name]acs = [self.kernel.access_to_sympy(var_name, r) for r in acs]acs = [self.kernel.subs_consts(e) for e in acs]acs.sort(reverse=True)distances += [(acs[i-]-acs[i]).simplify() for i in range(, len(acs))]distances.append(sympy.oo)distances.sort(reverse=True)distances_bytes = [d*element_size for d in distances]results = {'': {k: list(v) for k,v in accesses.items()},'': distances,'': destinations,'': distances_bytes,'': []}sum_array_sizes = sum(self.kernel.array_sizes(in_bytes=True, subs_consts=True).values())for c in self.machine.get_cachesim(self.cores).levels(with_mem=False):hits = misses = len(distances_bytes)cache_requirement = tail = if c.size() > sum_array_sizes:hits = missesmisses = cache_requirement = sum_array_sizeselse:for tail in sorted(set(distances_bytes), reverse=True):if tail is sympy.oo:continuecache_requirement = (sum([d for d in distances_bytes if d <= tail]) +tail*len([d for d in distances_bytes if d > tail]))if cache_requirement <= c.size():hits = len([d for d in distances_bytes if d <= tail])misses = len([d for d in distances_bytes if d > tail])breakresults[''].append({'': c.name,'': hits,'': misses,'': len(destinations) if c.size() < sum_array_sizes else ,'': cache_requirement,'': tail})self.results = results", "docstring": "Initialize layer condition based predictor from kernel and machine object.", "id": "f4114:c1:m0"} {"signature": "def good_prefix(self, max_error=, round_length=, min_prefix='', max_prefix=None):", "body": "good_prefix = min_prefixbase_value = self.base_value()for k, v in list(self.PREFIXES.items()):if max_prefix is not None and v > self.PREFIXES[max_prefix]:continueif abs(round(base_value/v, round_length)*v - base_value) > base_value*max_error:continueif abs(round(base_value/v, round_length)) < :continueif v < self.PREFIXES[good_prefix]:continuegood_prefix = kreturn good_prefix", "docstring": "returns the largest prefix where the relative error is bellow *max_error* although rounded\nby *round_length*\n\nif *max_prefix* is found in PrefixedUnit.PREFIXES, returned value will not exceed this\nprefix.\nif *min_prefix* is given, returned value will atleast be of that prefix (no matter the\nerror)", "id": "f4115:c0:m5"} {"signature": "def base_value(self):", "body": "return self.value*self.PREFIXES[self.prefix]", "docstring": "gives value without prefix", "id": "f4115:c0:m3"} {"signature": "def print_variables_info(self, output_file=sys.stdout):", "body": "table = ('' +'')for name, var_info in list(self.variables.items()):table += ''.format(name, var_info[], var_info[])print(prefix_indent('', table), file=output_file)", "docstring": "Print variables information in human readble format.", "id": "f4116:c0:m22"} {"signature": "def get_index_type(self, loop_nest=None):", "body": "if loop_nest is None:loop_nest = self.get_kernel_loop_nest()if type(loop_nest) is c_ast.For:loop_nest = [loop_nest]index_types = (None, None)for s in loop_nest:if type(s) is c_ast.For:if type(s.stmt) in [c_ast.For, c_ast.Compound]:other = self.get_index_type(loop_nest=s.stmt)else:other = Noneindex_types = (s.init.decls[].type.type.names, other)breakif index_types[] == index_types[] or index_types[] is None:return index_types[]else:raise ValueError(\"\".format(index_types))", "docstring": "Return index type used in loop nest.\n\nIf index type between loops differ, an exception is raised.", "id": "f4116:c1:m13"} {"signature": "def _remove_duplicate_accesses(self):", "body": "self.destinations = {var_name: set(acs) for var_name, acs in self.destinations.items()}self.sources = {var_name: set(acs) for var_name, acs in self.sources.items()}", "docstring": "Remove duplicate source and destination accesses", "id": "f4116:c0:m8"} {"signature": "def get_kernel_loop_nest(self):", "body": "loop_nest = [s for s in self.kernel_ast.block_itemsif type(s) in [c_ast.For, c_ast.Pragma, c_ast.FuncCall]]assert len(loop_nest) >= , \"\"return loop_nest", "docstring": "Return kernel loop nest including any preceding pragmas and following swaps.", "id": "f4116:c1:m16"} {"signature": "def build_executable(self, *args, **kwargs):", "body": "raise NotImplementedError(\"\"\"\")", "docstring": "Compile and build binary.", "id": "f4116:c0:m25"} {"signature": "def get_kernel_code(self, openmp=False, as_filename=False, name=''):", "body": "assert self.kernel_ast is not None, \"\"\"\"file_name = ''if openmp:file_name += ''file_name += ''fp, already_available = self._get_intermediate_file(file_name, machine_and_compiler_dependent=False)if already_available:code = fp.read()else:array_declarations, array_dimensions = self._build_array_declarations()if openmp:kernel = deepcopy(self.get_kernel_loop_nest())for aref in find_node_type(kernel, c_ast.ArrayRef):transform_multidim_to_1d_ref(aref, array_dimensions)omp_pragmas = [p for p in find_node_type(kernel, c_ast.Pragma)if '' in p.string]if not omp_pragmas:kernel.insert(, c_ast.Pragma(\"\"))else:kernel = deepcopy(self.get_kernel_loop_nest())for aref in find_node_type(kernel, c_ast.ArrayRef):transform_multidim_to_1d_ref(aref, array_dimensions)function_ast = c_ast.FuncDef(decl=c_ast.Decl(name=name, type=self._build_kernel_function_declaration(name=name), quals=[],storage=[], funcspec=[], init=None, bitsize=None),body=c_ast.Compound(block_items=kernel),param_decls=None)code = CGenerator().visit(function_ast)code = '' + codefp.write(code)fp.close()if as_filename:return fp.nameelse:return code", "docstring": "Generate and return compilable source code with kernel function from AST.\n\n:param openmp: if true, OpenMP code will be generated\n:param as_filename: if true, will save to file and return filename\n:param name: name of kernel function", "id": "f4116:c1:m23"} {"signature": "def access_to_sympy(self, var_name, access):", "body": "base_sizes = self.variables[var_name][]expr = sympy.Number()for dimension, a in enumerate(access):base_size = reduce(operator.mul, base_sizes[dimension+:], sympy.Integer())expr += base_size*areturn expr", "docstring": "Transform a (multidimensional) variable access to a flattend sympy expression.\n\nAlso works with flat array accesses.", "id": "f4116:c0:m9"} {"signature": "@lru_cache()def subs_consts(self, expr):", "body": "if isinstance(expr, numbers.Number):return exprelse:return expr.subs(self.constants)", "docstring": "Substitute constants in expression unless it is already a number.", "id": "f4116:c0:m5"} {"signature": "def transform_multidim_to_1d_ref(aref, dimension_dict):", "body": "dims = []name = arefwhile type(name) is c_ast.ArrayRef:dims.append(name.subscript)name = name.namesubscript_list = []for i, d in enumerate(dims):if i == :subscript_list.append(d)else:subscript_list.append(c_ast.BinaryOp('', d, reduce(lambda l, r: c_ast.BinaryOp('', l, r),dimension_dict[name.name][-:-i-:-])))aref.subscript = reduce(lambda l, r: c_ast.BinaryOp('', l, r), subscript_list)aref.name = name", "docstring": "Transform ast of multidimensional reference to a single dimension reference.\n\nIn-place operation!", "id": "f4116:m3"} {"signature": "def prefix_indent(prefix, textblock, later_prefix=''):", "body": "textblock = textblock.split('')line = prefix + textblock[] + ''if len(later_prefix) == :later_prefix = ''*len(prefix)line = line + ''.join([later_prefix + x for x in textblock[:]])if line[-] != '':return line + ''else:return line", "docstring": "Prefix and indent all lines in *textblock*.\n\n*prefix* is a prefix string\n*later_prefix* is used on all but the first line, if it is a single character\n it will be repeated to match length of *prefix*", "id": "f4116:m1"} {"signature": "def iaca_analysis(self, micro_architecture, asm_block='',pointer_increment='', verbose=False):", "body": "asm_filename = self.compile_kernel(assembly=True, verbose=verbose)asm_marked_filename = os.path.splitext(asm_filename)[]+''with open(asm_filename, '') as in_file, open(asm_marked_filename, '') as out_file:self.asm_block = iaca.iaca_instrumentation(in_file, out_file,block_selection=asm_block,pointer_increment=pointer_increment)obj_name = self.assemble_to_object(asm_marked_filename, verbose=verbose)return iaca.iaca_analyse_instrumented_binary(obj_name, micro_architecture), self.asm_block", "docstring": "Run an IACA analysis and return its outcome.\n\n*asm_block* controls how the to-be-marked block is chosen. \"auto\" (default) results in\nthe largest block, \"manual\" results in interactive and a number in the according block.\n\n*pointer_increment* is the number of bytes the pointer is incremented after the loop or\n - 'auto': automatic detection, RuntimeError is raised in case of failure\n - 'auto_with_manual_fallback': automatic detection, fallback to manual input\n - 'manual': prompt user", "id": "f4116:c1:m28"} {"signature": "def _build_const_declartions(self, with_init=True):", "body": "decls = []index_type = self.get_index_type()i = for k in self.constants:type_decl = c_ast.TypeDecl(k.name, [''], c_ast.IdentifierType(index_type))init = Noneif with_init:init = c_ast.FuncCall(c_ast.ID(''),c_ast.ExprList([c_ast.ArrayRef(c_ast.ID(''),c_ast.Constant('', str(i)))]))i += decls.append(c_ast.Decl(k.name, [''], [], [],type_decl, init, None))return decls", "docstring": "Generate constants declarations\n\n:return: list of declarations", "id": "f4116:c1:m14"} {"signature": "def _build_scalar_declarations(self, with_init=True):", "body": "scalar_declarations = [deepcopy(d) for d in self.kernel_ast.block_itemsif type(d) is c_ast.Decl and type(d.type) is c_ast.TypeDecl]if with_init:random.seed() for d in scalar_declarations:if d.type.type.names[] in ['', '']:d.init = c_ast.Constant('', str(random.uniform(, )))elif d.type.type.names[] in ['', '', '','', '', '']:d.init = c_ast.Constant('', )return scalar_declarations", "docstring": "Build and return scalar variable declarations", "id": "f4116:c1:m22"} {"signature": "def assemble_to_object(self, in_filename, verbose=False):", "body": "file_base_name = os.path.splitext(os.path.basename(in_filename))[]out_filename, already_exists = self._get_intermediate_file(file_base_name + '',binary=True,fp=False)if already_exists:passcompiler, compiler_args = self._machine.get_compiler()compiler_args.append('')cmd = [compiler] + [in_filename] +compiler_args + ['', out_filename]if verbose:print('', ''.join(cmd))try:subprocess.check_output(cmd)except subprocess.CalledProcessError as e:print(\"\", e, file=sys.stderr)sys.exit()return out_filename", "docstring": "Assemble *in_filename* assembly into *out_filename* object.\n\nIf *iaca_marked* is set to true, markers are inserted around the block with most packed\ninstructions or (if no packed instr. were found) the largest block and modified file is\nsaved to *in_file*.\n\n*asm_block* controls how the to-be-marked block is chosen. \"auto\" (default) results in\nthe largest block, \"manual\" results in interactive and a number in the according block.\n\n*pointer_increment* is the number of bytes the pointer is incremented after the loop or\n - 'auto': automatic detection, RuntimeError is raised in case of failure\n - 'auto_with_manual_fallback': automatic detection, fallback to manual input\n - 'manual': prompt user\n\nReturns two-tuple (filepointer, filename) to temp binary file.", "id": "f4116:c1:m26"} {"signature": "def iaca_analysis(self, *args, **kwargs):", "body": "raise NotImplementedError(\"\"\"\")", "docstring": "Run IACA analysis.", "id": "f4116:c0:m24"} {"signature": "def simulate(kernel, model, define_dict, blocking_constant, blocking_length):", "body": "kernel.clear_state()for k, v in define_dict.items():kernel.set_constant(k, v)kernel.set_constant(blocking_constant, blocking_length)model.analyze()return sum([cy for dscr, cy in model.results['']])", "docstring": "Setup and execute model with given blocking length", "id": "f4118:m1"} {"signature": "def measure_bw(type_, total_size, threads_per_core, max_threads_per_core, cores_per_socket,sockets):", "body": "groups = []for s in range(sockets):groups += ['','' + str(s) + '' + str(total_size) + '' +str(threads_per_core * cores_per_socket) +'' + str(int(max_threads_per_core / threads_per_core))]cmd = ['', '', type_] + groupssys.stderr.write(''.join(cmd))output = subprocess.Popen(cmd, stdout=subprocess.PIPE).communicate()[].decode('')if not output:print(''.join(cmd) + '''', file=sys.stderr)sys.exit()bw = float(get_match_or_break(r'', output)[])print('', PrefixedUnit(bw, ''), file=sys.stderr)return PrefixedUnit(bw, '')", "docstring": "*size* is given in kilo bytes", "id": "f4120:m4"} {"signature": "def select_best_block(blocks):", "body": "if not blocks:raise ValueError(\"\")best_block = max(blocks, key=lambda b: b[][''])if best_block[][''] == :best_block = max(blocks,key=lambda b: (b[][''] + b[][''] + b[][''],b[][''], b[][''], b[]['']))return best_block[]", "docstring": "Return best block selected based on simple heuristic.", "id": "f4122:m4"} {"signature": "@staticmethoddef parse_perfctr_event(perfctr):", "body": "split_perfctr = perfctr.split('')assert len(split_perfctr) >= , \"\"event_tuple = split_perfctr[:]parameters = {}for p in split_perfctr[:]:if '' in p:k, v = p.split('')if v.startswith(''):parameters[k] = int(v, )else:parameters[k] = int(v)else:parameters[p] = Noneevent_tuple.append(parameters)return tuple(event_tuple)", "docstring": "Parse events in machine description to tuple representation used in Benchmark module.\n\nExamples:\n>>> parse_perfctr_event('PERF_EVENT:REG[0-3]')\n('PERF_EVENT', 'REG[0-3]')\n>>> parse_perfctr_event('PERF_EVENT:REG[0-3]:STAY:FOO=23:BAR=0x23')\n('PERF_EVENT', 'REG[0-3]', {'STAY': None, 'FOO': 23, 'BAR': 35})", "id": "f4123:c0:m8"} {"signature": "def get_compiler(self, compiler=None, flags=None):", "body": "if self._args:compiler = compiler or self._args.compilerflags = flags or self._args.compiler_flagsif compiler is None:for c in self[''].keys():if find_executable(c) is not None:compiler = cbreakelse:raise RuntimeError(\"\"\"\"\"\".format(list(self[''].keys())))if flags is None:flags = self[''].get(compiler, '')return compiler, flags.split('')", "docstring": "Return tuple of compiler and compiler flags.\n\nSelects compiler and flags from machine description file, commandline arguments or call\narguements.", "id": "f4123:c0:m7"} {"signature": "def sanitize_symbolname(name):", "body": "return re.subn('', '', name)[]", "docstring": "Sanitize all characters not matched to a symbol by sympy's parse_expr.\n\nBased on same rules as used for python variables.", "id": "f4123:m0"} {"signature": "def __repr__(self):", "body": "return ''.format(self.__class__.__name__,repr(self._path or self._data['']),)", "docstring": "Return object representation.", "id": "f4123:c0:m2"} {"signature": "@staticmethoddef parse_perfmetric(metric):", "body": "perfcounters = re.findall(r'', metric)temp_metric = metrictemp_pc_names = {\"\".format(re.sub(\"\", \"\", pc)): pcfor i, pc in enumerate(perfcounters)}for var_name, pc in temp_pc_names.items():temp_metric = temp_metric.replace(pc, var_name)expr = parse_expr(temp_metric)for s in expr.free_symbols:if s.name in temp_pc_names:s.name = temp_pc_names[str(s)]events = {s: MachineModel.parse_perfctr_event(s.name) for s in expr.free_symbolsif s.name in perfcounters}return expr, events", "docstring": "Return (sympy expressions, event names and symbols dict) from performance metric str.", "id": "f4123:c0:m9"} {"signature": "def __and__(self, other):", "body": "return Intervals(*(self.data+other.data))", "docstring": "Combine two intervals, under the assumption that they are sane.", "id": "f4125:c0:m3"} {"signature": "def __len__(self):", "body": "return int(sum(upper-lower for (lower, upper) in self.data))", "docstring": "Return sum of range lengths.", "id": "f4125:c0:m4"} {"signature": "def build_minimal_runs(events):", "body": "events = [e for i, e in enumerate(events) if events.index(e) == i]scheduled_runs = {}scheduled_events = []cur_run = while len(scheduled_events) != len(events):for event_tpl in events:event, registers, parameters = event_tplif event_tpl in scheduled_events:continuefor possible_reg in register_options(registers):s = scheduled_runs.setdefault(cur_run, {})if possible_reg not in s:s[possible_reg] = (event, possible_reg, parameters)scheduled_events.append(event_tpl)breakcur_run += runs = [list(v.values()) for v in scheduled_runs.values()]return runs", "docstring": "Compile list of minimal runs for given events.", "id": "f4127:m3"} {"signature": "def receiver_blueprint_for(self, name):", "body": "provider = self.get_provider(name)bp = provider.make_receiver_blueprint()from flask.globals import g @bp.before_requestdef init_g():g.provider = providerreturn bp", "docstring": "Get a Flask blueprint for the named provider that handles incoming messages & status reports\n\n Note: this requires Flask microframework.\n\n :rtype: flask.blueprints.Blueprint\n :returns: Flask Blueprint, fully functional\n :raises KeyError: provider not found\n :raises NotImplementedError: Provider does not implement a receiver", "id": "f4134:c0:m7"} {"signature": "@propertydef default_provider(self):", "body": "return self._default_provider", "docstring": "Default provider name\n\n :rtype: str | None", "id": "f4134:c0:m2"} {"signature": "def add_provider(self, name, Provider, **config):", "body": "assert issubclass(Provider, IProvider), ''assert isinstance(name, str), ''provider = Provider(self, name, **config)assert name not in self._providers, ''self._providers[name] = providerif self.default_provider is None:self.default_provider = namereturn provider", "docstring": "Register a provider on the gateway\n\n The first provider defined becomes the default one: used in case the routing function has no better idea.\n\n :type name: str\n :param name: Provider name that will be used to uniquely identify it\n :type Provider: type\n :param Provider: Provider class that inherits from `smsframework.IProvider`\n :param config: Provider configuration. Please refer to the Provider documentation.\n :rtype: IProvider\n :returns: The created provider", "id": "f4134:c0:m1"} {"signature": "def send(self, message):", "body": "raise NotImplementedError('')", "docstring": "Send a message\n\n Providers are required to:\n * Populate `message.msgid` and `message.meta` on completion\n * Expect that `message.src` can be empty\n * Support both ASCII and Unicode messages\n * Use `message.params` for provider-dependent configuration\n * Raise exceptions from `exc.py` for errors\n\n :type message: data.OutgoingMessage\n :param message: The message to send\n :rtype: OutgoingMessage\n :returns: The sent message with populated fields\n :raises MessageSendError: sending errors", "id": "f4139:c0:m1"} {"signature": "def _receive_status(self, status):", "body": "status.provider = self.nameself.gateway.onStatus(status)return status", "docstring": "Incoming status callback\n\n Calls Gateway.onStatus event hook\n\n Providers are required to:\n * Cast phone numbers to digits-only\n * Use proper MessageStatus subclasses\n * Populate `status.msgid` and `status.meta` fields\n * If this method fails with an exception, the provider is required to respond with an error to the service\n\n :type status: MessageStatus\n :param status: The received status\n :rtype: MessageStatus", "id": "f4139:c0:m4"} {"signature": "def make_receiver_blueprint(self):", "body": "raise NotImplementedError('')", "docstring": "Get a Blueprint for the HTTP receiver\n\n :rtype: flask.Blueprint\n :returns: configured Flask Blueprint receiver\n :raises NotImplementedError: Provider does not support message reception", "id": "f4139:c0:m2"} {"signature": "@bp.route('', methods=[''])@jsonex_apidef status():", "body": "req = jsonex_loads(request.get_data())status = g.provider._receive_status(req[''])return {'': status}", "docstring": "Incoming status handler: forwarded by ForwardServerProvider", "id": "f4140:m1"} {"signature": "def make_receiver_blueprint(self):", "body": "from .receiver_client import bpreturn bp", "docstring": "Create the receiver so server can send messages to us\n :rtype: flask.Blueprint", "id": "f4141:c0:m2"} {"signature": "def forward(self, obj):", "body": "assert isinstance(obj, (IncomingMessage, MessageStatus)), ''.format(obj)clients = self.choose_clients(obj)if Parallel:pll = Parallel(self._forward_object_to_client)for client in clients:pll(client, obj)results, errors = pll.join()if errors:raise errors[]else:for client in clients:self._forward_object_to_client(client, obj)", "docstring": "Forward an object to clients.\n\n :param obj: The object to be forwarded\n :type obj: smsframework.data.IncomingMessage|smsframework.data.MessageStatus\n :raises Exception: if any of the clients failed", "id": "f4141:c1:m3"} {"signature": "def jsonex_request(url, data, headers=None):", "body": "url, headers = _parse_authentication(url)headers[''] = ''try:req = Request(url, headers=headers)response = urlopen(req, jsonex_dumps(data))res_str = response.read()res = jsonex_loads(res_str)except HTTPError as e:if '' in e.headers and e.headers[''] == '':res = jsonex_loads(e.read())else:raise exc.ServerError(''.format(url, e))except URLError as e:raise exc.ConnectionError(''.format(url, e))if '' in res: raise res[''] return res", "docstring": "Make a request with JsonEx\n :param url: URL\n :type url: str\n :param data: Data to POST\n :type data: dict\n :return: Response\n :rtype: dict\n :raises exc.ConnectionError: Connection error\n :raises exc.ServerError: Remote server error (unknown)\n :raises exc.ProviderError: any errors reported by the remote", "id": "f4141:m4"} {"signature": "def choose_clients(self, obj):", "body": "return self.clients", "docstring": "Given a message, decides which clients will receive it.\n\n Override to have custom routing. Default: send to all clients\n\n :param obj: The object to be forwarded\n :type obj: smsframework.data.IncomingMessage|smsframework.data.MessageStatus\n :return: List of client URLs to forward the message to\n :rtype: list[str]", "id": "f4141:c1:m1"} {"signature": "def jsonex_api(f):", "body": "@wraps(f)def wrapper(*args, **kwargs):try:code, res = , f(*args, **kwargs)except HTTPException as e:code, res = e.code, {'': e}except Exception as e:code, res = , {'': e}logger.exception('')response = make_response(jsonex_dumps(res), code)response.headers[''] = ''return responsereturn wrapper", "docstring": "View wrapper for JsonEx responses. Catches exceptions as well", "id": "f4141:m2"} {"signature": "def _parse_authentication(url):", "body": "u = urlh = {} if url in _parse_authentication._memoize:u, h = _parse_authentication._memoize[url]else:p = urlsplit(url, '')if p.username and p.password:h[''] = b'' + base64.b64encode(p.username.encode() + b'' + p.password.encode())u = urlunsplit((p.scheme, p.netloc.split('', )[], p.path, p.query, p.fragment))_parse_authentication._memoize[url] = (u, h)return u, h", "docstring": "Parse authentication data from the URL and put it in the `headers` dict. With caching behavior\n :param url: URL\n :type url: str\n :return: (URL without authentication info, headers dict)\n :rtype: str, dict", "id": "f4141:m3"} {"signature": "def __init__(self, gateway, name, server_url):", "body": "self.server_url = server_url.rstrip('') + '' super(ForwardClientProvider, self).__init__(gateway, name)", "docstring": "Init the forwarding client\n :param server_url: Server URL.\n The URL should point to ForwardServerProvider registered on the server\n :type server_url: str", "id": "f4141:c0:m0"} {"signature": "def received(self, src, body):", "body": "self._msgid += message = IncomingMessage(src, body, self._msgid)self._traffic.append(message)self._receive_message(message)return message", "docstring": "Simulate an incoming message\n\n :type src: str\n :param src: Message source\n :type boby: str | unicode\n :param body: Message body\n :rtype: IncomingMessage", "id": "f4147:c0:m2"} {"signature": "def subscribe(self, number, callback):", "body": "self._subscribers[digits_only(number)] = callbackreturn self", "docstring": "Register a virtual subscriber which receives messages to the matching number.\n\n :type number: str\n :param number: Subscriber phone number\n :type callback: callable\n :param callback: A callback(OutgoingMessage) which handles the messages directed to the subscriber.\n The message object is augmented with the .reply(str) method which allows to send a reply easily!\n :rtype: LoopbackProvider", "id": "f4147:c0:m3"} {"signature": "def multiply(traj):", "body": "z = traj.x * traj.ytraj.f_add_result('', z, comment='')", "docstring": "Example of a sophisticated numerical experiment\n that involves multiplying two integer values.\n\n :param traj:\n Trajectory containing the parameters in a particular\n combination, it also serves as a container for results.", "id": "f4154:m0"} {"signature": "def no_op_wraps(func):", "body": "def wrapper(decorator):return funcreturn wrapper", "docstring": "Replaces functools.wraps in order to undo wrapping.\n\n Can be used to preserve the decorated function's signature\n in the documentation generated by Sphinx.", "id": "f4155:m0"} {"signature": "@staticmethoddef _apply_fast_access(data, fast_access):", "body": "if fast_access and data.f_supports_fast_access():return data.f_get()else:return data", "docstring": "Method that checks if fast access is possible and applies it if desired", "id": "f4156:c6:m26"} {"signature": "@propertydef v_full_name(self):", "body": "return self._full_name", "docstring": "The full name, relative to the root node.\n\n The full name of a trajectory or single run is the empty string since it is root.", "id": "f4156:c3:m10"} {"signature": "def _remove_node_or_leaf(self, instance, recursive=False):", "body": "full_name = instance.v_full_namesplit_name = deque(full_name.split(''))self._remove_along_branch(self._root_instance, split_name, recursive)", "docstring": "Removes a single node from the tree.\n\n Only from RAM not from hdf5 file!\n\n :param instance: The node to be deleted\n\n :param recursive: If group nodes with children should be deleted", "id": "f4156:c6:m10"} {"signature": "@propertydef vars(self):", "body": "if self._vars is None:self._vars = NNTreeNodeVars(self)return self._vars", "docstring": "Alternative naming, you can use `node.vars.name` instead of `node.v_name`", "id": "f4156:c3:m1"} {"signature": "def _create_any_param_or_result(self, parent_node, name, type_name, instance, constructor,args, kwargs):", "body": "root = self._root_instancefull_name = self._make_full_name(parent_node.v_full_name, name)if instance is None:if constructor is None:if type_name == RESULT:constructor = root._standard_resultelif type_name in [PARAMETER, CONFIG, DERIVED_PARAMETER]:constructor = root._standard_parameterelse:constructor = root._standard_leafinstance = root._construct_instance(constructor, full_name, *args, **kwargs)else:instance._rename(full_name)self._set_details_tree_node(parent_node, name, instance)where_dict = self._map_type_to_dict(type_name)full_name = instance._full_nameif full_name in where_dict:raise AttributeError(full_name + '')if type_name != RESULT and full_name in root._changed_default_parameters:self._logger.info('' %full_name)change_args, change_kwargs = root._changed_default_parameters.pop(full_name)instance.f_set(*change_args, **change_kwargs)where_dict[full_name] = instanceself._add_to_nodes_and_leaves(instance)parent_node._children[name] = instanceparent_node._leaves[name] = instanceif full_name in self._root_instance._explored_parameters:instance._explored = True self._root_instance._explored_parameters[full_name] = instanceself._logger.debug('' % full_name)return instance", "docstring": "Generically creates a novel parameter or result instance inferring from the `type_name`.\n\n If the instance is already supplied it is NOT constructed new.\n\n :param parent_node:\n\n Parent trajectory node\n\n :param name:\n\n Name of the new result or parameter. Here the name no longer contains colons.\n\n :param type_name:\n\n Whether it is a parameter below parameters, config, derived parameters or whether\n it is a result.\n\n :param instance:\n\n The instance if it has been constructed somewhere else, otherwise None.\n\n :param constructor:\n\n A constructor used if instance needs to be constructed. If None the current standard\n constructor is chosen.\n\n :param args:\n\n Additional arguments passed to the constructor\n\n :param kwargs:\n\n Additional keyword arguments passed to the constructor\n\n :return: The new instance", "id": "f4156:c6:m23"} {"signature": "def _create_link(self, act_node, name, instance):", "body": "act_node._links[name] = instanceact_node._children[name] = instancefull_name = instance.v_full_nameif full_name not in self._root_instance._linked_by:self._root_instance._linked_by[full_name] = {}linking = self._root_instance._linked_by[full_name]if act_node.v_full_name not in linking:linking[act_node.v_full_name] = (act_node, set())linking[act_node.v_full_name][].add(name)if name not in self._links_count:self._links_count[name] = self._links_count[name] = self._links_count[name] + self._logger.debug('''' % (name, act_node.v_full_name,instance.v_full_name))return instance", "docstring": "Creates a link and checks if names are appropriate", "id": "f4156:c6:m20"} {"signature": "def f_add_result_group(self, *args, **kwargs):", "body": "return self._nn_interface._add_generic(self, type_name=RESULT_GROUP,group_type_name=RESULT_GROUP,args=args, kwargs=kwargs)", "docstring": "Adds an empty result group under the current node.\n\n Adds the full name of the current node as prefix to the name of the group.\n If current node is a single run (root) adds the prefix `'results.runs.run_08%d%'` to the\n full name where `'08%d'` is replaced by the index of the current run.\n\n The `name` can also contain subgroups separated via colons, for example:\n `name=subgroup1.subgroup2.subgroup3`. These other parent groups will be automatically\n be created.", "id": "f4156:c9:m0"} {"signature": "def _get_all(self, node, name, max_depth, shortcuts):", "body": "if max_depth is None:max_depth = float('')if isinstance(name, list):split_name = nameelif isinstance(name, tuple):split_name = list(name)elif isinstance(name, int):split_name = [name]else:split_name = name.split('')for idx, key in enumerate(split_name):_, key = self._translate_shortcut(key)_, key = self._replace_wildcards(key)split_name[idx] = keyreturn self._backwards_search(node, split_name, max_depth, shortcuts)", "docstring": "Searches for all occurrences of `name` under `node`.\n\n :param node:\n\n Start node\n\n :param name:\n\n Name what to look for can be longer and separated by colons, i.e.\n `mygroupA.mygroupB.myparam`.\n\n :param max_depth:\n\n Maximum depth to search for relative to start node.\n\n :param shortcuts:\n\n If shortcuts are allowed\n\n :return:\n\n List of nodes that match the name, empty list if nothing was found.", "id": "f4156:c6:m35"} {"signature": "def f_add_leaf(self, *args, **kwargs):", "body": "return self._nn_interface._add_generic(self, type_name=LEAF,group_type_name=GROUP,args=args, kwargs=kwargs,add_prefix=False)", "docstring": "Adds an empty generic leaf under the current node.\n\n You can add to a generic leaves anywhere you want. So you are free to build\n your trajectory tree with any structure. You do not necessarily have to follow the\n four subtrees `config`, `parameters`, `derived_parameters`, `results`.\n\n If you are operating within these subtrees this simply calls the corresponding adding\n function.\n\n Be aware that if you are within a single run and you add items not below a group\n `run_XXXXXXXX` that you have to manually\n save the items. Otherwise they will be lost after the single run is completed.", "id": "f4156:c7:m21"} {"signature": "def f_add_result(self, *args, **kwargs):", "body": "return self._nn_interface._add_generic(self, type_name=RESULT,group_type_name=RESULT_GROUP,args=args, kwargs=kwargs)", "docstring": "Adds a result under the current node.\n\n There are two ways to add a new result either by adding a result instance:\n\n >>> new_result = Result('group1.group2.myresult', 1666, x=3, y=4, comment='Example!')\n >>> traj.f_add_result(new_result)\n\n Or by passing the values directly to the function, with the name being the first\n (non-keyword!) argument:\n\n >>> traj.f_add_result('group1.group2.myresult', 1666, x=3, y=3,comment='Example!')\n\n\n If you want to create a different result than the standard result, you can\n give the constructor as the first (non-keyword!) argument followed by the name\n (non-keyword!):\n\n >>> traj.f_add_result(PickleResult,'group1.group2.myresult', 1666, x=3, y=3, comment='Example!')\n\n Additional arguments (here `1666`) or keyword arguments (here `x=3, y=3`) are passed\n onto the constructor of the result.\n\n\n Adds the full name of the current node as prefix to the name of the result.\n If current node is a single run (root) adds the prefix `'results.runs.run_08%d%'` to the\n full name where `'08%d'` is replaced by the index of the current run.", "id": "f4156:c9:m1"} {"signature": "@propertydef v_branch(self):", "body": "return self._branch", "docstring": "The name of the branch/subtree, i.e. the first node below the root.\n\n The empty string in case of root itself.", "id": "f4156:c3:m14"} {"signature": "@staticmethoddef _determine_types(start_node, first_name, add_leaf, add_link):", "body": "if start_node.v_is_root:where = first_nameelse:where = start_node._branchif where in SUBTREE_MAPPING:type_tuple = SUBTREE_MAPPING[where]else:type_tuple = (GROUP, LEAF)if add_link:return type_tuple[], LINKif add_leaf:return type_tupleelse:return type_tuple[], type_tuple[]", "docstring": "Determines types for generic additions", "id": "f4156:c6:m14"} {"signature": "def f_get_all(self, name, max_depth=None, shortcuts=True):", "body": "return self._nn_interface._get_all(self, name, max_depth=max_depth, shortcuts=shortcuts)", "docstring": "Searches for all occurrences of `name` under `node`.\n\n Links are NOT considered since nodes are searched bottom up in the tree.\n\n :param node:\n\n Start node\n\n :param name:\n\n Name of what to look for, can be separated by colons, i.e.\n ``'mygroupA.mygroupB.myparam'``.\n\n :param max_depth:\n\n Maximum search depth relative to start node.\n `None` for no limit.\n\n :param shortcuts:\n\n If shortcuts are allowed, otherwise the stated name defines a\n consecutive name.For instance. ``'mygroupA.mygroupB.myparam'`` would\n also find ``mygroupA.mygroupX.mygroupB.mygroupY.myparam`` if shortcuts\n are allowed, otherwise not.\n\n :return:\n\n List of nodes that match the name, empty list if nothing was found.", "id": "f4156:c7:m41"} {"signature": "def _very_fast_search(self, node, key, max_depth, with_links, crun):", "body": "if key in self._links_count:returnparent_full_name = node.v_full_namestarting_depth = node.v_depthcandidate_dict = self._get_candidate_dict(key, crun)if with_links:upper_bound = else:upper_bound = FAST_UPPER_BOUNDif len(candidate_dict) > upper_bound:raise pex.TooManyGroupsError('')result_node = Nonefor goal_name in candidate_dict:if goal_name.startswith(parent_full_name):candidate = candidate_dict[goal_name]if candidate.v_depth - starting_depth <= max_depth:if result_node is not None:raise pex.NotUniqueNodeError(''''''% (key, goal_name, result_node.v_full_name))result_node = candidateif result_node is not None:return result_node, result_node.v_depth", "docstring": "Fast search for a node in the tree.\n\n The tree is not traversed but the reference dictionaries are searched.\n\n :param node:\n\n Parent node to start from\n\n :param key:\n\n Name of node to find\n\n :param max_depth:\n\n Maximum depth.\n\n :param with_links:\n\n If we work with links than we can only be sure to found the node in case we\n have a single match. Otherwise the other match might have been linked as well.\n\n :param crun:\n\n If given only nodes belonging to this particular run are searched and the rest\n is blinded out.\n\n :return: The found node and its depth\n\n :raises:\n\n TooManyGroupsError:\n\n If search cannot performed fast enough, an alternative search method is needed.\n\n NotUniqueNodeError:\n\n If several nodes match the key criterion", "id": "f4156:c6:m32"} {"signature": "def f_add_parameter_group(self, *args, **kwargs):", "body": "return self._nn_interface._add_generic(self, type_name=PARAMETER_GROUP,group_type_name=PARAMETER_GROUP,args=args, kwargs=kwargs)", "docstring": "Adds an empty parameter group under the current node.\n\n Can be called with ``f_add_parameter_group('MyName', 'this is an informative comment')``\n or ``f_add_parameter_group(name='MyName', comment='This is an informative comment')``\n or with a given new group instance:\n ``f_add_parameter_group(ParameterGroup('MyName', comment='This is a comment'))``.\n\n Adds the full name of the current node as prefix to the name of the group.\n If current node is the trajectory (root), the prefix `'parameters'`\n is added to the full name.\n\n The `name` can also contain subgroups separated via colons, for example:\n `name=subgroup1.subgroup2.subgroup3`. These other parent groups will be automatically\n created.", "id": "f4156:c8:m0"} {"signature": "def _get(self, node, name, fast_access,shortcuts, max_depth, auto_load, with_links):", "body": "if auto_load and not with_links:raise ValueError('')if isinstance(name, list):split_name = nameelif isinstance(name, tuple):split_name = list(name)elif isinstance(name, int):split_name = [name]else:split_name = name.split('')if node.v_is_root:if len(split_name) == and split_name[] == '':return nodekey = split_name[]_, key = self._translate_shortcut(key)if key in SUBTREE_MAPPING and key not in node._children:node.f_add_group(key)if max_depth is None:max_depth = float('')if len(split_name) > max_depth and shortcuts:raise ValueError('' %(str(name), max_depth))try_auto_load_directly1 = Falsetry_auto_load_directly2 = Falsewildcard_positions = []root = self._root_instancefor idx, key in enumerate(split_name):translated_shortcut, key = self._translate_shortcut(key)if translated_shortcut:split_name[idx] = keyif key[] == '':raise AttributeError('''''' % key)is_wildcard = self._root_instance.f_is_wildcard(key)if (not is_wildcard and key not in self._nodes_and_leaves andkey not in self._links_count):try_auto_load_directly1 = Truetry_auto_load_directly2 = Trueif is_wildcard:wildcard_positions.append((idx, key))if root.f_wildcard(key) not in self._nodes_and_leaves:try_auto_load_directly1 = Trueif root.f_wildcard(key, -) not in self._nodes_and_leaves:try_auto_load_directly2 = Truerun_idx = root.v_idxwildcard_exception = None if try_auto_load_directly1 and try_auto_load_directly2 and not auto_load:for wildcard_pos, wildcard in wildcard_positions:split_name[wildcard_pos] = root.f_wildcard(wildcard, run_idx)raise AttributeError('' %str(''.join(split_name)))if run_idx > -:with self._disable_logging:try:for wildcard_pos, wildcard in wildcard_positions:split_name[wildcard_pos] = root.f_wildcard(wildcard, run_idx)result = self._perform_get(node, split_name, fast_access,shortcuts, max_depth, auto_load, with_links,try_auto_load_directly1)return resultexcept (pex.DataNotInStorageError, AttributeError) as exc:wildcard_exception = excif wildcard_positions:for wildcard_pos, wildcard in wildcard_positions:split_name[wildcard_pos] = root.f_wildcard(wildcard, -)try:return self._perform_get(node, split_name, fast_access,shortcuts, max_depth, auto_load, with_links,try_auto_load_directly2)except (pex.DataNotInStorageError, AttributeError):if wildcard_exception is not None:raise wildcard_exceptionelse:raise", "docstring": "Searches for an item (parameter/result/group node) with the given `name`.\n\n :param node: The node below which the search is performed\n\n :param name: Name of the item (full name or parts of the full name)\n\n :param fast_access: If the result is a parameter, whether fast access should be applied.\n\n :param max_depth:\n\n Maximum search depth relative to start node.\n\n :param auto_load:\n\n If data should be automatically loaded\n\n :param with_links\n\n If links should be considered\n\n :return:\n\n The found instance (result/parameter/group node) or if fast access is True and you\n found a parameter or result that supports fast access, the contained value is returned.\n\n :raises:\n\n AttributeError if no node with the given name can be found.\n Raises errors that are raised by the storage service if `auto_load=True` and\n item cannot be found.", "id": "f4156:c6:m37"} {"signature": "def _add_group_from_storage(self, args, kwargs):", "body": "return self._nn_interface._add_generic(self,type_name=GROUP,group_type_name=GROUP,args=args,kwargs=kwargs,add_prefix=False,check_naming=False)", "docstring": "Can be called from storage service to create a new group to bypass name checking", "id": "f4156:c7:m10"} {"signature": "def _iter_nodes(self, node, recursive=False, max_depth=float(''),with_links=True, in_search=False, predicate=None):", "body": "def _run_predicate(x, run_name_set):branch = x.v_run_branchreturn branch == '' or branch in run_name_setif max_depth is None:max_depth = float('')if predicate is None:predicate = lambda x: Trueelif isinstance(predicate, (tuple, list)):run_list = predicaterun_name_set = set()for item in run_list:if item == -:run_name_set.add(self._root_instance.f_wildcard('', -))elif isinstance(item, int):run_name_set.add(self._root_instance.f_idx_to_run(item))else:run_name_set.add(item)predicate = lambda x: _run_predicate(x, run_name_set)if recursive:return NaturalNamingInterface._recursive_traversal_bfs(node,self._root_instance._linked_by,max_depth, with_links,in_search, predicate)else:iterator = (x for x in self._make_child_iterator(node, with_links) ifpredicate(x[]))if in_search:return iterator else:return (x[] for x in iterator)", "docstring": "Returns an iterator over nodes hanging below a given start node.\n\n :param node:\n\n Start node\n\n :param recursive:\n\n Whether recursively also iterate over the children of the start node's children\n\n :param max_depth:\n\n Maximum depth to search for\n\n :param in_search:\n\n if it is used during get search and if detailed info should be returned\n\n :param with_links:\n\n If links should be considered\n\n :param predicate:\n\n A predicate to filter nodes\n\n :return: Iterator", "id": "f4156:c6:m27"} {"signature": "def f_debug(self):", "body": "return self._debug()", "docstring": "Creates a dummy object containing the whole tree to make unfolding easier.\n\n This method is only useful for debugging purposes.\n If you use an IDE and want to unfold the trajectory tree, you always need to\n open the private attribute `_children`. Use to this function to create a new\n object that contains the tree structure in its attributes.\n\n Manipulating the returned object does not change the original tree!", "id": "f4156:c7:m15"} {"signature": "def f_has_leaves(self):", "body": "return len(self._leaves) != ", "docstring": "Checks if node has leaves or not", "id": "f4156:c7:m27"} {"signature": "def __dir__(self):", "body": "result = super(NNGroupNode, self).__dir__()if not is_debug():result.extend(self.f_dir_data())return result", "docstring": "Adds all children to auto-completion", "id": "f4156:c7:m13"} {"signature": "def f_has_links(self):", "body": "return len(self._links) != ", "docstring": "Checks if node has children or not", "id": "f4156:c7:m23"} {"signature": "def _add_leaf_from_storage(self, args, kwargs):", "body": "return self._nn_interface._add_generic(self,type_name=LEAF,group_type_name=GROUP,args=args, kwargs=kwargs,add_prefix=False,check_naming=False)", "docstring": "Can be called from storage service to create a new leaf to bypass name checking", "id": "f4156:c7:m11"} {"signature": "def f_add_link(self, name_or_item, full_name_or_item=None):", "body": "if isinstance(name_or_item, str):name = name_or_itemif isinstance(full_name_or_item, str):instance = self.v_root.f_get(full_name_or_item)else:instance = full_name_or_itemelse:instance = name_or_itemname = instance.v_namereturn self._nn_interface._add_generic(self, type_name=LINK,group_type_name=GROUP, args=(name, instance),kwargs={},add_prefix=False)", "docstring": "Adds a link to an existing node.\n\n Can be called as ``node.f_add_link(other_node)`` this will add a link the `other_node`\n with the link name as the name of the node.\n\n Or can be called as ``node.f_add_link(name, other_node)`` to add a link to the\n `other_node` and the given `name` of the link.\n\n In contrast to addition of groups and leaves, colon separated names\n are **not** allowed, i.e. ``node.f_add_link('mygroup.mylink', other_node)``\n does not work.", "id": "f4156:c7:m19"} {"signature": "def __getitem__(self, item):", "body": "return self._nn_interface._get(self, item,fast_access=self.v_root.v_fast_access,shortcuts=self.v_root.v_shortcuts,max_depth=self.v_root.v_max_depth,auto_load=self.v_root.v_auto_load,with_links=self.v_root.v_with_links)", "docstring": "Equivalent to calling `__getattr__`.\n\n Per default the item is returned and fast access is applied.", "id": "f4156:c7:m36"} {"signature": "def f_add_derived_parameter_group(self, *args, **kwargs):", "body": "return self._nn_interface._add_generic(self, type_name=DERIVED_PARAMETER_GROUP,group_type_name=DERIVED_PARAMETER_GROUP,args=args, kwargs=kwargs)", "docstring": "Adds an empty derived parameter group under the current node.\n\n Adds the full name of the current node as prefix to the name of the group.\n If current node is a single run (root) adds the prefix `'derived_parameters.runs.run_08%d%'`\n to the full name where `'08%d'` is replaced by the index of the current run.\n\n The `name` can also contain subgroups separated via colons, for example:\n `name=subgroup1.subgroup2.subgroup3`. These other parent groups will be automatically\n be created.", "id": "f4156:c10:m0"} {"signature": "def execute_network_pre_run(self, traj, network, network_dict, component_list, analyser_list):", "body": "self._execute_network_run(traj, network, network_dict, component_list, analyser_list,pre_run=True)", "docstring": "Runs a network before the actual experiment.\n\n Called by a :class:`~pypet.brian2.network.NetworkManager`.\n Similar to :func:`~pypet.brian2.network.NetworkRunner.run_network`.\n\n Subruns and their durations are extracted from the trajectory. All\n :class:`~pypet.brian2.parameter.Brian2Parameter` instances found under\n `traj.parameters.simulation.pre_durations` (default, you can change the\n name of the group where to search for durations at runner initialisation).\n The order is determined from\n the `v_annotations.order` attributes. There must be at least one subrun in the trajectory,\n otherwise an AttributeError is thrown. If two subruns equal in their order\n property a RuntimeError is thrown.\n\n :param traj: Trajectory container\n\n :param network: BRIAN2 network\n\n :param network_dict: Dictionary of items shared among all components\n\n :param component_list: List of :class:`~pypet.brian2.network.NetworkComponent` objects\n\n :param analyser_list: List of :class:`~pypet.brian2.network.NetworkAnalyser` objects", "id": "f4158:c2:m1"} {"signature": "def _extract_subruns(self, traj, pre_run=False):", "body": "if pre_run:durations_list = traj.f_get_all(self._pre_durations_group_name)else:durations_list = traj.f_get_all(self._durations_group_name)subruns = {}orders = []for durations in durations_list:for duration_param in durations.f_iter_leaves(with_links=False):if '' in duration_param.v_annotations:order = duration_param.v_annotations.orderelse:raise RuntimeError('''' %duration_param.v_full_name)if order in subruns:raise RuntimeError('''' % order)else:subruns[order] = duration_paramorders.append(order)return [subruns[order] for order in sorted(orders)]", "docstring": "Extracts subruns from the trajectory.\n\n :param traj: Trajectory container\n\n :param pre_run: Boolean whether current run is regular or a pre-run\n\n :raises: RuntimeError if orders are duplicates or even missing", "id": "f4158:c2:m3"} {"signature": "def add_parameters(self, traj):", "body": "pass", "docstring": "Adds parameters to `traj`.\n\n Function called from the :class:`~pypet.brian2.network.NetworkManager` to\n define and add parameters to the trajectory container.", "id": "f4158:c0:m0"} {"signature": "def add_to_network(self, traj, network, current_subrun, subrun_list, network_dict):", "body": "pass", "docstring": "Can add network objects before a specific `subrun`.\n\n Called by a :class:`~pypet.brian2.network.NetworkRunner` before a the\n given `subrun`.\n\n Potentially one wants to add some BRIAN2 objects later to the network than\n at the very beginning of an experimental run. For example, a monitor might\n be added at the second subrun after an initial phase that is not supposed\n to be recorded.\n\n :param traj: Trajectoy container\n\n :param network:\n\n BRIAN2 network where elements could be added via `add(...)`.\n\n :param current_subrun:\n\n :class:`~pypet.brian2.parameter.Brian2Parameter` specifying the very next\n subrun to be simulated.\n\n :param subrun_list:\n\n List of :class:`~pypet.brian2.parameter.Brian2Parameter` objects that are to\n be run after the current subrun.\n\n :param network_dict:\n\n Dictionary of items shared by all components.", "id": "f4158:c0:m3"} {"signature": "def pre_build(self, traj):", "body": "self._logger.info('')for component in self.components:component.pre_build(traj, self._brian_list, self._network_dict)if self.analysers:self._logger.info('')for analyser in self.analysers:analyser.pre_build(traj, self._brian_list, self._network_dict)self._logger.info('')self.network_runner.pre_build(traj, self._brian_list, self._network_dict)self._pre_built = True", "docstring": "Pre-builds network components.\n\n Calls :func:`~pypet.brian2.network.NetworkComponent.pre_build` for all components,\n analysers, and the network runner.\n\n `pre_build` is not automatically called but either needs to be executed manually\n by the user, either calling it directly or by using\n :func:`~pypet.brian2.network.NetworkManager.pre_run`.\n\n This function does not create a `BRIAN2 network`, but only it's components.\n\n :param traj: Trajectory container", "id": "f4158:c3:m2"} {"signature": "def add_parameters(self, traj):", "body": "self._logger.info('')for component in self.components:component.add_parameters(traj)if self.analysers:self._logger.info('')for analyser in self.analysers:analyser.add_parameters(traj)self._logger.info('')self.network_runner.add_parameters(traj)", "docstring": "Adds parameters for a network simulation.\n\n Calls :func:`~pypet.brian2.network.NetworkComponent.add_parameters` for all components,\n analyser, and the network runner (in this order).\n\n :param traj: Trajectory container", "id": "f4158:c3:m1"} {"signature": "def f_set_single(self, name, item):", "body": "if type(item) in [SpikeMonitor, StateMonitor, PopulationRateMonitor]:if self.v_stored:self._logger.warning('''''')self._extract_monitor_data(item)else:super(Brian2MonitorResult, self).f_set_single(name, item)", "docstring": "To add a monitor use `f_set_single('monitor', brian_monitor)`.\n\n Otherwise `f_set_single` works similar to :func:`~pypet.parameter.Result.f_set_single`.", "id": "f4159:c2:m4"} {"signature": "def _supports(self, data):", "body": "if isinstance(data, Quantity):return Trueelif super(Brian2Result, self)._supports(data):return Truereturn False", "docstring": "Simply checks if data is supported", "id": "f4159:c1:m2"} {"signature": "@propertydef v_monitor_type(self):", "body": "return self._monitor_type", "docstring": "The type of the stored monitor. Each MonitorResult can only manage a single Monitor.", "id": "f4159:c2:m3"} {"signature": "def get_random_port_url():", "body": "url = port_to_tcp()errwrite('' % url)return url", "docstring": "Determines the local server url with a random port", "id": "f4160:m4"} {"signature": "def get_log_path(traj, process_name=None):", "body": "return rename_log_file(generic_log_folder, trajectory=traj, process_name=process_name)", "docstring": "Returns the path to the log files based on trajectory name etc.", "id": "f4160:m3"} {"signature": "def prepare_log_config():", "body": "conf = testParams['']pypet_path = os.path.abspath(os.path.join(os.path.dirname(__file__), ''))init_path = os.path.join(pypet_path, '')if conf == '':conf_file = os.path.join(init_path, '')conf_parser = handle_config_file(conf_file)conf = conf_parserelif conf == '':conf_file = os.path.join(init_path, '')conf_parser = handle_config_file(conf_file)conf = conf_parsertestParams[''] = conf", "docstring": "Prepares the test logging init files and creates parsers.", "id": "f4160:m5"} {"signature": "def remove_data():", "body": "global testParamsif testParams['']:get_root_logger().log(, '')shutil.rmtree(testParams[''], True)", "docstring": "Removes all data from temporary folder", "id": "f4160:m12"} {"signature": "def create_param_dict(param_dict):", "body": "param_dict[''] = {}param_dict[''] = {}param_dict[''] ={}param_dict[''] = {}param_dict[''] = {}param_dict[''] ={}param_dict[''] ={}param_dict['']={}normal_dict = param_dict['']normal_dict[''] = ''normal_dict[''] = normal_dict[''] = normal_dict[''] = normal_dict[''] =Truenormal_dict[''] = numpy_dict=param_dict['']numpy_dict[''] = np.array(['', '', ''])numpy_dict[''] = np.array([,,,])numpy_dict[''] = np.array([,,,])numpy_dict[''] = np.array([True, False, True])param_dict[''][''] = np.matrix([[,],[,]])param_dict[''][''] = np.array([[[,],[,]],[[,-],[,]]])spsparse_csc = spsp.lil_matrix((,))spsparse_csc[,] = spsparse_csc[,] = spsparse_csc = spsparse_csc.tocsc()spsparse_csr = spsp.lil_matrix((,))spsparse_csr[,] = spsparse_csr[,] = spsparse_csr = spsparse_csr.tocsr()spsparse_bsr = spsp.bsr_matrix(np.matrix([[, , , , , ],[, , , , , ],[, , , , , ],[, , , , , ],[, , , , , ],[, , , , , ]]))spsparse_dia = spsp.dia_matrix(np.matrix([[, , , ],[, , , ],[, , , ],[, , , ]]))param_dict[''][''] = spsparse_bsrparam_dict[''][''] = spsparse_cscparam_dict[''][''] = spsparse_csrparam_dict[''][''] = spsparse_diaparam_dict[''][''] = ()param_dict[''][''] = (,,)param_dict[''][''] = (,,)param_dict[''][''] = ('','','')param_dict[''][''] = []param_dict[''][''] = [,,]param_dict[''][''] = [,,]param_dict[''][''] = ['','','']param_dict['']['']= ['','', , (), ]param_dict['']['']= ['','', , (), ]param_dict['']['']= ['',[,], , (),]", "docstring": "Fills a dictionary with some parameters that can be put into a trajectory.", "id": "f4161:m0"} {"signature": "def clear_handlers(self):", "body": "root = logging.getLogger()for logger in list(root.manager.loggerDict.values()) + [root]:if hasattr(logger, ''):handlers = logger.handlersfor handler in handlers:if hasattr(handler, ''):handler.flush()if hasattr(handler, ''):handler.close()logger.handlers = []sys.stdout = sys.__stdout__sys.stderr = sys.__stderr__", "docstring": "Deletes all handlers and closes all log-files", "id": "f4161:c0:m1"} {"signature": "def execute_example(filename):", "body": "new_filename = Nonetry:new_filename = prepend_mpl_import(filename)retcode = subprocess.call(sys.executable + '' + new_filename, shell=True)if retcode != :print('' % filename)sys.exit(retcode)else:print('')finally:if new_filename is not None:os.remove(new_filename)", "docstring": "Executes a file as script.\n\n First prepends matplotlib import", "id": "f4163:m2"} {"signature": "def _prm_add_meta_info(self, instance, group, overwrite=False):", "body": "if overwrite:flags = ()else:flags = (HDF5StorageService.ADD_ROW,)definitely_store_comment = Truetry:definitely_store_comment = self._prm_meta_add_summary(instance)try:table_name = instance.v_branch + ''table = getattr(self._overview_group, table_name)if len(table) < pypetconstants.HDF5_MAX_OVERVIEW_TABLE_LENGTH:self._all_store_param_or_result_table_entry(instance, table,flags=flags)except pt.NoSuchNodeError:passexcept Exception as exc:self._logger.error('' % repr(exc))if ((not self._purge_duplicate_comments or definitely_store_comment) andinstance.v_comment != ''):setattr(group._v_attrs, HDF5StorageService.COMMENT, instance.v_comment)setattr(group._v_attrs, HDF5StorageService.CLASS_NAME, instance.f_get_class_name())setattr(group._v_attrs, HDF5StorageService.LEAF, True)if instance.v_is_parameter and instance.v_explored:try:tablename = ''table = getattr(self._overview_group, tablename)if len(table) < pypetconstants.HDF5_MAX_OVERVIEW_TABLE_LENGTH:self._all_store_param_or_result_table_entry(instance, table,flags=flags)except pt.NoSuchNodeError:passexcept Exception as exc:self._logger.error('''' % repr(exc))", "docstring": "Adds information to overview tables and meta information to\n the `instance`s hdf5 `group`.\n\n :param instance: Instance to store meta info about\n :param group: HDF5 group of instance\n :param overwrite: If data should be explicitly overwritten", "id": "f4214:c5:m74"} {"signature": "def _prm_write_into_other_array(self, key, data, group, fullname,flag, **kwargs):", "body": "try:if flag == HDF5StorageService.CARRAY:factory = self._hdf5file.create_carrayelif flag == HDF5StorageService.EARRAY:factory = self._hdf5file.create_earrayelif flag == HDF5StorageService.VLARRAY:factory = self._hdf5file.create_vlarrayelse:raise RuntimeError('')if key in group:raise ValueError('')if '' in kwargs:filters = kwargs.pop('')else:filters = self._all_get_filters(kwargs)try:other_array = factory(where=group, name=key, obj=data,filters=filters, **kwargs)except (ValueError, TypeError) as exc:try:conv_data = data[:]conv_data = np.core.defchararray.encode(conv_data, self.encoding)other_array = factory(where=group, name=key,obj=conv_data,filters=filters, **kwargs)except Exception:raise excif data is not None:self._all_set_attributes_to_recall_natives(data, other_array,HDF5StorageService.DATA_PREFIX)setattr(other_array._v_attrs, HDF5StorageService.STORAGE_TYPE, flag)self._hdf5file.flush()except:self._logger.error('' % (flag, key, fullname))raise", "docstring": "Stores data as carray, earray or vlarray depending on `flag`.\n\n :param key:\n\n Name of data item to store\n\n :param data:\n\n Data to store\n\n :param group:\n\n Group node where to store data in hdf5 file\n\n :param fullname:\n\n Full name of the `data_to_store`s original container, only needed for throwing errors.\n\n :param recall:\n\n If container type and data type for perfect recall should be stored\n\n :param flag:\n\n How to store:\n CARRAY, EARRAY, VLARRAY", "id": "f4214:c5:m83"} {"signature": "def _lnk_delete_link(self, link_name):", "body": "translated_name = '' + self._trajectory_name + '' + link_name.replace('','')link = self._hdf5file.get_node(where=translated_name)link._f_remove()", "docstring": "Removes a link from disk", "id": "f4214:c5:m85"} {"signature": "def _ann_store_annotations(self, item_with_annotations, node, overwrite=False):", "body": "if overwrite is True or overwrite == '':annotated = self._all_get_from_attrs(node, HDF5StorageService.ANNOTATED)if annotated:current_attrs = node._v_attrsfor attr_name in current_attrs._v_attrnames:if attr_name.startswith(HDF5StorageService.ANNOTATION_PREFIX):delattr(current_attrs, attr_name)delattr(current_attrs, HDF5StorageService.ANNOTATED)self._hdf5file.flush()if not item_with_annotations.v_annotations.f_is_empty():anno_dict = item_with_annotations.v_annotations._dictcurrent_attrs = node._v_attrschanged = Falsefor field_name in anno_dict:val = anno_dict[field_name]field_name_with_prefix = HDF5StorageService.ANNOTATION_PREFIX + field_nameif field_name_with_prefix not in current_attrs:setattr(current_attrs, field_name_with_prefix, val)changed = Trueif changed:setattr(current_attrs, HDF5StorageService.ANNOTATED, True)self._hdf5file.flush()", "docstring": "Stores annotations into an hdf5 file.", "id": "f4214:c5:m67"} {"signature": "@staticmethoddef _prm_get_longest_stringsize(string_list):", "body": "maxlength = for stringar in string_list:if isinstance(stringar, np.ndarray):if stringar.ndim > :for string in stringar.ravel():maxlength = max(len(string), maxlength)else:maxlength = max(len(stringar.tolist()), maxlength)else:maxlength = max(len(stringar), maxlength)return int(maxlength * )", "docstring": "Returns the longest string size for a string entry across data.", "id": "f4214:c5:m90"} {"signature": "def _prm_write_pandas_data(self, key, data, group, fullname, flag, **kwargs):", "body": "try:if '' not in kwargs:filters = self._all_get_filters(kwargs)kwargs[''] = filtersif '' not in kwargs:kwargs[''] = self.pandas_formatif '' not in kwargs:kwargs[''] = self.encodingoverwrite = kwargs.pop('', False)if key in group and not (overwrite or kwargs.get('', False)):raise ValueError('''' % (key, fullname))else:self._logger.debug('' % (key, fullname))if data is not None and (kwargs[''] == '' or kwargs[''] == ''):kwargs[''] = data.shape[]name = group._v_pathname + '' + keyself._hdf5store.put(name, data, **kwargs)self._hdf5store.flush()self._hdf5file.flush()frame_group = group._f_get_child(key)setattr(frame_group._v_attrs, HDF5StorageService.STORAGE_TYPE, flag)self._hdf5file.flush()except:self._logger.error('' % (key, fullname))raise", "docstring": "Stores a pandas DataFrame into hdf5.\n\n :param key:\n\n Name of data item to store\n\n :param data:\n\n Pandas Data to Store\n\n :param group:\n\n Group node where to store data in hdf5 file\n\n :param fullname:\n\n Full name of the `data_to_store`s original container, only needed for throwing errors.\n\n :param flag:\n\n If it is a series, frame or panel", "id": "f4214:c5:m82"} {"signature": "def _trj_merge_trajectories(self, other_trajectory_name, rename_dict, move_nodes=False,delete_trajectory=False, other_filename=None):", "body": "if other_filename is None or other_filename == self.filename:other_filename = self.filenameother_file = self._hdf5fileother_is_different = Falseelse:other_file = pt.open_file(filename=other_filename, mode='')other_is_different = Truetry:if not '' + other_trajectory_name in other_file:raise ValueError('''' % (self._trajectory_name,other_trajectory_name,other_filename))for old_name in rename_dict:new_name = rename_dict[old_name]split_name = old_name.split('')old_location = '' + other_trajectory_name + '' + ''.join(split_name)split_name = new_name.split('')new_parent_location = '' + self._trajectory_name + '' + ''.join(split_name[:-])new_short_name = split_name[-]old_node = other_file.get_node(old_location)if move_nodes:self._hdf5file.move_node( where=old_node, newparent=new_parent_location,newname=new_short_name, createparents=True)else:if other_is_different:new_parent_dot_location = ''.join(split_name[:-])new_parent_or_loc, _ = self._all_create_or_get_groups(new_parent_dot_location)create_parents = Falseelse:new_parent_or_loc = new_parent_locationcreate_parents = Trueself._hdf5file.copy_node(where=old_node, newparent=new_parent_or_loc,newname=new_short_name, createparents=create_parents,recursive=True)if delete_trajectory:other_file.remove_node(where='', name=other_trajectory_name, recursive=True)finally:if other_is_different:other_file.flush()other_file.close()", "docstring": "Merges another trajectory into the current trajectory (as in self._trajectory_name).\n\n :param other_trajectory_name: Name of other trajectory\n :param rename_dict: Dictionary with old names (keys) and new names (values).\n :param move_nodes: Whether to move hdf5 nodes or copy them\n :param delete_trajectory: Whether to delete the other trajectory", "id": "f4214:c5:m32"} {"signature": "def _all_add_or_modify_row(self, item_name, insert_dict, table, index=None, condition=None,condvars=None,flags=(ADD_ROW, MODIFY_ROW,)):", "body": "if len(flags) == :returnif index is not None and condition is not None:raise ValueError('')elif condition is not None:row_iterator = table.where(condition, condvars=condvars)elif index is not None:row_iterator = table.iterrows(index, index + )else:row_iterator = Nonetry:row = next(row_iterator)except TypeError:row = Noneexcept StopIteration:row = Noneif ((HDF5StorageService.MODIFY_ROW in flags or HDF5StorageService.ADD_ROW in flags) andHDF5StorageService.REMOVE_ROW in flags):raise ValueError('')if row is None and HDF5StorageService.ADD_ROW in flags:row = table.rowself._all_insert_into_row(row, insert_dict)row.append()elif row is not None and HDF5StorageService.MODIFY_ROW in flags:self._all_insert_into_row(row, insert_dict)row.update()elif HDF5StorageService.REMOVE_ROW in flags:if row is not None:row_number = row.nrowtry:table.remove_rows(start=row_number, stop=row_number+)except NotImplementedError:passelse:raise ValueError('''')self._all_kill_iterator(row_iterator)table.flush()if HDF5StorageService.REMOVE_ROW not in flags and row is None:raise RuntimeError('''' % (item_name, table._v_name))", "docstring": "Adds or changes a row in a pytable.\n\n :param item_name: Name of item, the row is about, only important for throwing errors.\n\n :param insert_dict:\n\n Dictionary of data that is about to be inserted into the pytables row.\n\n :param table:\n\n The table to insert or modify a row in\n\n :param index:\n\n Index of row to be modified. Instead of an index a search condition can be\n used as well, see below.\n\n :param condition:\n\n Condition to search for in the table\n\n :param condvars:\n\n Variables for the search condition\n\n :param flags:\n\n Flags whether to add, modify, or remove a row in the table", "id": "f4214:c5:m61"} {"signature": "def _prm_load_parameter_or_result(self, instance,load_data=pypetconstants.LOAD_DATA,load_only=None,load_except=None,load_flags=None,with_links=False,recursive=False,max_depth=None,_hdf5_group=None,):", "body": "if load_data == pypetconstants.LOAD_NOTHING:returnif _hdf5_group is None:_hdf5_group = self._all_get_node_by_name(instance.v_full_name)if load_data == pypetconstants.OVERWRITE_DATA:if instance.v_is_parameter and instance.v_locked:self._logger.debug('' %instance.v_full_name)returninstance.f_empty()instance.v_annotations.f_empty()instance.v_comment = ''self._all_load_skeleton(instance, _hdf5_group)instance._stored = Trueif isinstance(load_only, str):load_only = [load_only]if isinstance(load_except, str):load_except = [load_except]if load_data == pypetconstants.LOAD_SKELETON:self._node_processing_timer.signal_update()returnelif load_only is not None:if load_except is not None:raise ValueError('''')elif instance.v_is_parameter and instance.v_locked:raise pex.ParameterLockedException('''' %instance.v_full_name)self._logger.debug('' %str(load_only))load_only = set(load_only)elif load_except is not None:if instance.v_is_parameter and instance.v_locked:raise pex.ParameterLockedException('''' %instance.v_full_name)self._logger.debug('' %str(load_except))load_except = set(load_except)elif not instance.f_is_empty():self._node_processing_timer.signal_update()returnfull_name = instance.v_full_nameself._logger.debug('' % full_name)load_dict = {} if load_flags is None:load_flags = {}try:instance_flags = instance._load_flags().copy() except AttributeError:instance_flags = {}instance_flags.update(load_flags)load_flags = instance_flagsself._prm_load_into_dict(full_name=full_name,load_dict=load_dict,hdf5_group=_hdf5_group,instance=instance,load_only=load_only,load_except=load_except,load_flags=load_flags)if load_only is not None:if len(load_only) > :self._logger.warning('''' %(str(load_only), full_name))elif load_except is not None:if len(load_except) > :self._logger.warning(('''' % (str(load_except), full_name)))if load_dict:try:instance._load(load_dict)if instance.v_is_parameter:instance.f_lock()except:self._logger.error('' % full_name)raiseself._node_processing_timer.signal_update()", "docstring": "Loads a parameter or result from disk.\n\n :param instance:\n\n Empty parameter or result instance\n\n :param load_data:\n\n How to load stuff\n\n :param load_only:\n\n List of data keys if only parts of a result should be loaded\n\n :param load_except:\n\n List of data key that should NOT be loaded.\n\n :param load_flags:\n\n Dictionary to determine how something is loaded\n\n :param with_links:\n\n Placeholder, because leaves have no links\n\n :param recursive:\n\n Dummy variable, no-op because leaves have no children\n\n :param max_depth:\n\n Dummy variable, no-op because leaves have no children\n\n :param _hdf5_group:\n\n The corresponding hdf5 group of the instance", "id": "f4214:c5:m92"} {"signature": "def _all_get_or_create_table(self, where, tablename, description, expectedrows=None):", "body": "where_node = self._hdf5file.get_node(where)if not tablename in where_node:if not expectedrows is None:table = self._hdf5file.create_table(where=where_node, name=tablename,description=description, title=tablename,expectedrows=expectedrows,filters=self._all_get_filters())else:table = self._hdf5file.create_table(where=where_node, name=tablename,description=description, title=tablename,filters=self._all_get_filters())else:table = where_node._f_get_child(tablename)return table", "docstring": "Creates a new table, or if the table already exists, returns it.", "id": "f4214:c5:m54"} {"signature": "@propertydef fletcher32(self):", "body": "return self._fletcher32", "docstring": "Whether fletcher 32 should be used", "id": "f4214:c5:m11"} {"signature": "def _srvc_load_several_items(self, iterable, *args, **kwargs):", "body": "for input_tuple in iterable:msg = input_tuple[]item = input_tuple[]if len(input_tuple) > :args = input_tuple[]if len(input_tuple) > :kwargs = input_tuple[]if len(input_tuple) > :raise RuntimeError('')self.load(msg, item, *args, **kwargs)", "docstring": "Loads several items from an iterable\n\n Iterables are supposed to be of a format like `[(msg, item, args, kwarg),...]`\n If `args` and `kwargs` are not part of a tuple, they are taken from the\n current `args` and `kwargs` provided to this function.", "id": "f4214:c5:m24"} {"signature": "def _all_load_skeleton(self, traj_node, hdf5_group):", "body": "if traj_node.v_annotations.f_is_empty():self._ann_load_annotations(traj_node, hdf5_group)if traj_node.v_comment == '':comment = self._all_get_from_attrs(hdf5_group, HDF5StorageService.COMMENT)if comment is None:comment = ''traj_node.v_comment = comment", "docstring": "Reloads skeleton data of a tree node", "id": "f4214:c5:m71"} {"signature": "def _tree_create_leaf(self, name, trajectory, hdf5_group):", "body": "class_name = self._all_get_from_attrs(hdf5_group, HDF5StorageService.CLASS_NAME)class_constructor = trajectory._create_class(class_name)instance = trajectory._construct_instance(class_constructor, name)return instance", "docstring": "Creates a new pypet leaf instance.\n\n Returns the leaf and if it is an explored parameter the length of the range.", "id": "f4214:c5:m46"} {"signature": "@propertydef complib(self):", "body": "return self._complib", "docstring": "Compression library used", "id": "f4214:c5:m7"} {"signature": "def _all_get_node_by_name(self, name):", "body": "path_name = name.replace('', '')where = '' % (self._trajectory_name, path_name)return self._hdf5file.get_node(where=where)", "docstring": "Returns an HDF5 node by the path specified in `name`", "id": "f4214:c5:m55"} {"signature": "def _all_get_filters(self, kwargs=None):", "body": "if kwargs is None:kwargs = {}complib = kwargs.pop('', None)complevel = kwargs.pop('', None)shuffle = kwargs.pop('', None)fletcher32 = kwargs.pop('', None)if complib is not None:self._filters = Noneelse:complib = self._complibif complevel is not None:self._filters = Noneelse:complevel = self._complevelif shuffle is not None:self._filters = Noneelse:shuffle = self._shuffleif fletcher32 is not None:self._filters = Noneelse:fletcher32 = self._fletcher32if self._filters is None:self._filters = pt.Filters(complib=complib, complevel=complevel,shuffle=shuffle, fletcher32=fletcher32)self._hdf5file.filters = self._filtersself._hdf5store._filters = self._filtersself._hdf5store._complevel = complevelself._hdf5store._complib = complibself._hdf5store._fletcher32 = fletcher32return self._filters", "docstring": "Makes filters\n\n Pops filter arguments from `kwargs` such that they are not passed\n on to other functions also using kwargs.", "id": "f4214:c5:m20"} {"signature": "def _tree_load_link(self, new_traj_node, load_data, traj, as_new, hdf5_soft_link):", "body": "try:linked_group = hdf5_soft_link()link_name = hdf5_soft_link._v_nameif (not link_name in new_traj_node._links orload_data==pypetconstants.OVERWRITE_DATA):link_location = linked_group._v_pathnamefull_name = ''.join(link_location.split('')[:])if not full_name in traj:self._tree_load_sub_branch(traj, full_name,load_data=pypetconstants.LOAD_SKELETON,with_links=False, recursive=False, _trajectory=traj,_as_new=as_new, _hdf5_group=self._trajectory_group)if (load_data == pypetconstants.OVERWRITE_DATA andlink_name in new_traj_node._links):new_traj_node.f_remove_link(link_name)if not link_name in new_traj_node._links:new_traj_node._nn_interface._add_generic(new_traj_node,type_name=nn.LINK,group_type_name=nn.GROUP,args=(link_name,traj.f_get(full_name)),kwargs={},add_prefix=False,check_naming=False)else:raise RuntimeError('')except pt.NoSuchNodeError:self._logger.error('''''' %(hdf5_soft_link._v_name, new_traj_node.v_full_name))", "docstring": "Loads a link\n\n :param new_traj_node: Node in traj containing link \n :param load_data: How to load data in the linked node\n :param traj: The trajectory\n :param as_new: If data in linked node should be loaded as new\n :param hdf5_soft_link: The hdf5 soft link", "id": "f4214:c5:m48"} {"signature": "def _trj_load_trajectory(self, traj, as_new, load_parameters, load_derived_parameters,load_results, load_other_data, recursive, max_depth,with_run_information, with_meta_data, force):", "body": "if (as_new and (load_derived_parameters != pypetconstants.LOAD_NOTHING orload_results != pypetconstants.LOAD_NOTHING orload_other_data != pypetconstants.LOAD_NOTHING)):raise ValueError('''')if as_new and load_parameters != pypetconstants.LOAD_DATA:raise ValueError('''')loadconstants = (pypetconstants.LOAD_NOTHING, pypetconstants.LOAD_SKELETON,pypetconstants.LOAD_DATA, pypetconstants.OVERWRITE_DATA)if not (load_parameters in loadconstants and load_derived_parameters in loadconstants andload_results in loadconstants and load_other_data in loadconstants):raise ValueError('''''''' % str(loadconstants))traj._stored = not as_newload_data = max(load_parameters, load_derived_parameters, load_results, load_other_data)if with_meta_data:self._trj_load_meta_data(traj, load_data, as_new, with_run_information, force)if (load_parameters != pypetconstants.LOAD_NOTHING orload_derived_parameters != pypetconstants.LOAD_NOTHING orload_results != pypetconstants.LOAD_NOTHING orload_other_data != pypetconstants.LOAD_NOTHING):self._logger.info('' % traj.v_name)else:self._logger.info('' % traj.v_name)returnmaximum_display_other = counter = for children in [self._trajectory_group._v_groups, self._trajectory_group._v_links]:for hdf5_group_name in children:hdf5_group = children[hdf5_group_name]child_name = hdf5_group._v_nameload_subbranch = Trueif child_name == '':if as_new:loading = pypetconstants.LOAD_NOTHINGelse:loading = load_parameterselif child_name == '':loading = load_parameterselif child_name == '':loading = load_resultselif child_name == '':loading = load_derived_parameterselif child_name == '':continueelse:loading = load_other_dataload_subbranch = Falseif loading == pypetconstants.LOAD_NOTHING:continueif load_subbranch:self._logger.info('' %(child_name, str(loading)))else:if counter < maximum_display_other:self._logger.info('' % (child_name, str(loading)))elif counter == maximum_display_other:self._logger.info('''''''''')counter += self._tree_load_sub_branch(traj, child_name, load_data=loading, with_links=True,recursive=recursive,max_depth=max_depth,_trajectory=traj, _as_new=as_new,_hdf5_group=self._trajectory_group)", "docstring": "Loads a single trajectory from a given file.\n\n\n :param traj: The trajectory\n\n :param as_new: Whether to load trajectory as new\n\n :param load_parameters: How to load parameters and config\n\n :param load_derived_parameters: How to load derived parameters\n\n :param load_results: How to load results\n\n :param load_other_data: How to load anything not within the four subbranches\n\n :param recursive: If data should be loaded recursively\n\n :param max_depth: Maximum depth of loading\n\n :param with_run_information:\n\n If run information should be loaded\n\n :param with_meta_data:\n\n If meta data infor should be loaded\n\n :param force: Force load in case there is a pypet version mismatch\n\n You can specify how to load the parameters, derived parameters and results\n as follows:\n\n :const:`pypet.pypetconstants.LOAD_NOTHING`: (0)\n\n Nothing is loaded\n\n :const:`pypet.pypetconstants.LOAD_SKELETON`: (1)\n\n The skeleton including annotations are loaded, i.e. the items are empty.\n Non-empty items in RAM are left untouched.\n\n :const:`pypet.pypetconstants.LOAD_DATA`: (2)\n\n The whole data is loaded.\n Only empty or in RAM non-existing instance are filled with the\n data found on disk.\n\n :const:`pypet.pypetconstants.OVERWRITE_DATA`: (3)\n\n The whole data is loaded.\n If items that are to be loaded are already in RAM and not empty,\n they are emptied and new data is loaded from disk.\n\n\n If `as_new=True` the old trajectory is loaded into the new one, only parameters can be\n loaded. If `as_new=False` the current trajectory is completely replaced by the one\n on disk, i.e. the name from disk, the timestamp, etc. are assigned to `traj`.", "id": "f4214:c5:m34"} {"signature": "def _srvc_make_overview_tables(self, tables_to_make, traj=None):", "body": "for table_name in tables_to_make:paramdescriptiondict = {}expectedrows = paramdescriptiondict[''] = pt.StringCol(pypetconstants.HDF5_STRCOL_MAX_LOCATION_LENGTH,pos=)paramdescriptiondict[''] = pt.StringCol(pypetconstants.HDF5_STRCOL_MAX_NAME_LENGTH,pos=)paramdescriptiondict[''] = pt.StringCol(pypetconstants.HDF5_STRCOL_MAX_COMMENT_LENGTH)paramdescriptiondict[''] = pt.StringCol(pypetconstants.HDF5_STRCOL_MAX_VALUE_LENGTH, pos=)if table_name == '':if traj is not None:expectedrows = len(traj._config)if table_name == '':if traj is not None:expectedrows = len(traj._parameters)if table_name == '':paramdescriptiondict[''] = pt.StringCol(pypetconstants.HDF5_STRCOL_MAX_RANGE_LENGTH)paramdescriptiondict[''] = pt.IntCol()if traj is not None:expectedrows = len(traj._explored_parameters)if table_name.endswith(''):paramdescriptiondict[''] = pt.StringCol(, pos=)if table_name == '':expectedrows = self._derived_parameters_per_runif traj is not None:expectedrows *= len(traj)expectedrows += len(traj._derived_parameters)if table_name == '':expectedrows = self._results_per_runif traj is not None:expectedrows *= len(traj)expectedrows += len(traj._results)if expectedrows > :paramtable = self._all_get_or_create_table(where=self._overview_group,tablename=table_name,description=paramdescriptiondict,expectedrows=expectedrows)else:paramtable = self._all_get_or_create_table(where=self._overview_group,tablename=table_name,description=paramdescriptiondict)paramtable.flush()", "docstring": "Creates the overview tables in overview group", "id": "f4214:c5:m43"} {"signature": "def _grp_store_group(self, traj_group, store_data=pypetconstants.STORE_DATA,with_links=True, recursive=False, max_depth=None,_hdf5_group=None, _newly_created=False):", "body": "if store_data == pypetconstants.STORE_NOTHING:returnelif store_data == pypetconstants.STORE_DATA_SKIPPING and traj_group._stored:self._logger.debug('' %traj_group.v_full_name)elif not recursive:if _hdf5_group is None:_hdf5_group, _newly_created = self._all_create_or_get_groups(traj_group.v_full_name)overwrite = store_data == pypetconstants.OVERWRITE_DATAif (traj_group.v_comment != '' and(HDF5StorageService.COMMENT not in _hdf5_group._v_attrs or overwrite)):setattr(_hdf5_group._v_attrs, HDF5StorageService.COMMENT, traj_group.v_comment)if ((_newly_created or overwrite) andtype(traj_group) not in (nn.NNGroupNode, nn.ConfigGroup, nn.ParameterGroup,nn.DerivedParameterGroup, nn.ResultGroup)):setattr(_hdf5_group._v_attrs, HDF5StorageService.CLASS_NAME,traj_group.f_get_class_name())self._ann_store_annotations(traj_group, _hdf5_group, overwrite=overwrite)self._hdf5file.flush()traj_group._stored = Trueself._node_processing_timer.signal_update()if recursive:parent_traj_group = traj_group.f_get_parent()parent_hdf5_group = self._all_create_or_get_groups(parent_traj_group.v_full_name)[]self._tree_store_nodes_dfs(parent_traj_group, traj_group.v_name, store_data=store_data,with_links=with_links, recursive=recursive,max_depth=max_depth, current_depth=,parent_hdf5_group=parent_hdf5_group)", "docstring": "Stores a group node.\n\n For group nodes only annotations and comments need to be stored.", "id": "f4214:c5:m69"} {"signature": "def _trj_check_version(self, version, python, force):", "body": "curr_python = pypetconstants.python_version_stringif (version != VERSION or curr_python != python) and not force:raise pex.VersionMismatchError('''''''''' %(VERSION, curr_python, version, python))elif version != VERSION or curr_python != python:self._logger.warning('''''''''' %(VERSION, curr_python, version, python))", "docstring": "Checks for version mismatch\n\n Raises a VersionMismatchError if version of loaded trajectory and current pypet version\n do not match. In case of `force=True` error is not raised only a warning is emitted.", "id": "f4214:c5:m38"} {"signature": "def _srvc_check_hdf_properties(self, traj):", "body": "for attr_name in HDF5StorageService.ATTR_LIST:try:config = traj.f_get('' + attr_name).f_get()setattr(self, attr_name, config)except AttributeError:self._logger.debug('''' %(attr_name, str(getattr(self, attr_name))))for attr_name, table_name in HDF5StorageService.NAME_TABLE_MAPPING.items():try:if table_name in ('', ''):table_name += ''config = traj.f_get('' + table_name).f_get()setattr(self, attr_name, config)except AttributeError:self._logger.debug('''' %(table_name, str(getattr(self, attr_name))))for attr_name, name in HDF5StorageService.PR_ATTR_NAME_MAPPING.items():try:config = traj.f_get('' + name).f_get()setattr(self, attr_name, config)except AttributeError:self._logger.debug('''' %(name, str(getattr(self, attr_name))))if ((not self._overview_results_summary ornot self._overview_derived_parameters_summary) andself._purge_duplicate_comments):raise RuntimeError('''''')self._filters = None", "docstring": "Reads out the properties for storing new data into the hdf5file\n\n :param traj:\n\n The trajectory", "id": "f4214:c5:m25"} {"signature": "def _prm_read_array(self, array, full_name):", "body": "try:result = self._svrc_read_array(array)result, dummy = self._all_recall_native_type(result, array,HDF5StorageService.DATA_PREFIX)return resultexcept:self._logger.error('' % (array._v_name, full_name))raise", "docstring": "Reads data from an array or carray\n\n :param array:\n\n PyTables array or carray to read from\n\n :param full_name:\n\n Full name of the parameter or result whose data is to be loaded\n\n :return:\n\n Data to load", "id": "f4214:c5:m98"} {"signature": "def get_data_node(self):", "body": "if not self._storage_service.is_open:warnings.warn('''',category=RuntimeWarning)return self._request_data('')", "docstring": "Returns the actula node of the underlying data.\n\n In case one uses HDF5 this will be the HDF5 leaf node.", "id": "f4215:c1:m5"} {"signature": "def open_store(self):", "body": "service = self._traj.v_storage_serviceif service.is_open:raise RuntimeError('')service.store(pypetconstants.OPEN_FILE, None,trajectory_name=self._traj.v_name)", "docstring": "Opens store manually not needed if used with `with`", "id": "f4215:c0:m4"} {"signature": "def create_shared_data(self, **kwargs):", "body": "if '' not in kwargs:kwargs[''] = self.FLAGif '' in kwargs:kwargs[''] = kwargs.pop('')if '' in kwargs:self.traj = kwargs.pop('')if '' in kwargs:self.traj = kwargs.pop('')if '' in kwargs:self.name = kwargs.pop['']if '' in kwargs:self.parent = kwargs.pop('')if self.name is not None:self.parent[self.name] = selfreturn self._request_data('', kwargs=kwargs)", "docstring": "Creates shared data on disk with a StorageService on disk.\n\n Needs to be called before shared data can be used later on.\n\n Actual arguments of ``kwargs`` depend on the type of data to be\n created. For instance, creating an array one can use the keyword\n ``obj`` to pass a numpy array (``obj=np.zeros((10,20,30))``).\n Whereas for a PyTables table may need a description dictionary\n (``description={'column_1': pt.StringCol(2, pos=0),'column_2': pt.FloatCol( pos=1)}``)\n Refer to the PyTables documentation on how to create tables.", "id": "f4215:c1:m3"} {"signature": "def close_store(self):", "body": "service = self._traj.v_storage_serviceif not service.is_open:raise RuntimeError('''')service.store(pypetconstants.CLOSE_FILE, None)", "docstring": "Closes store manually not needed if used with `with`", "id": "f4215:c0:m3"} {"signature": "def create_shared_data(self, name=None, **kwargs):", "body": "if name is None:item = self.f_get()else:item = self.f_get(name)return item.create_shared_data(**kwargs)", "docstring": "Calls the corresponding function of the shared data item", "id": "f4215:c8:m6"} {"signature": "@propertydef is_open(self):", "body": "return self._storage_service.is_open", "docstring": "Normally the file is opened and closed after each insertion.\n\n However, the storage service may provide the option to keep the store open and signals\n this via this property.", "id": "f4217:c18:m3"} {"signature": "def load(self, *args, **kwargs):", "body": "raise NotImplementedError('''''')", "docstring": "Not implemented", "id": "f4217:c19:m2"} {"signature": "def finalize(self):", "body": "if self._context is not None:if self._socket is not None:self._close_socket(confused=False)self._context.term()self._context = Noneself._poll = None", "docstring": "Closes socket and terminates context\n\n NO-OP if already closed.", "id": "f4217:c4:m4"} {"signature": "def store(self, msg, stuff_to_store, *args, **kwargs):", "body": "trajectory_name = kwargs['']if trajectory_name not in self.references:self.references[trajectory_name] = []self.references[trajectory_name].append((msg, cp.copy(stuff_to_store), args, kwargs))", "docstring": "Simply keeps a reference to the stored data", "id": "f4217:c19:m1"} {"signature": "def load(self, *args, **kwargs):", "body": "try:self.acquire_lock()return self._storage_service.load(*args, **kwargs)finally:if self.lock is not None:try:self.release_lock()except RuntimeError:self._logger.error('' % str(self.lock))", "docstring": "Acquires a lock before loading and releases it afterwards.", "id": "f4217:c18:m7"} {"signature": "@retry(, Exception, , '')def _put_on_pipe(self, to_put):", "body": "self.acquire_lock()self._send_chunks(to_put)self.release_lock()", "docstring": "Puts data on queue", "id": "f4217:c14:m3"} {"signature": "@propertydef multiproc_safe(self):", "body": "return True", "docstring": "This wrapper guarantees multiprocessing safety", "id": "f4217:c0:m1"} {"signature": "@propertydef multiproc_safe(self):", "body": "return True", "docstring": "Usually storage services are not supposed to be multiprocessing safe", "id": "f4217:c18:m4"} {"signature": "def _unlock(self, name, client_id, request_id):", "body": "if name in self._locks:other_client_id, other_request_id, lock_time = self._locks[name]if other_client_id != client_id:response = (self.RELEASE_ERROR + self.DELIMITER +'''' % (name,other_client_id,other_request_id,client_id,request_id))self._logger.error(response)return responseelse:del self._locks[name]return self.RELEASEDelif (name, client_id) in self._timeout_locks:other_request_id, lock_time = self._timeout_locks[(name, client_id)]timeout = time.time() - lock_time - self._timeoutresponse = (self.RELEASE_ERROR + self.DELIMITER +'''' % (name, timeout, client_id, other_request_id))return responseelse:response = (self.RELEASE_ERROR + self.DELIMITER +'''' % (name, client_id, request_id))self._logger.warning(response)return response", "docstring": "Handles unlocking", "id": "f4217:c3:m2"} {"signature": "def __call__(self, index, total, percentage_step=, logger='', log_level=logging.INFO,reprint=False, time=True, length=, fmt_string=None, reset=False):", "body": "reset = (reset orindex <= self._current_index ortotal != self._total)if reset:self._reset(index, total, percentage_step, length)statement = Noneindexp1 = index + next_interval = int(indexp1 / self._norm_factor)ending = index >= self._total_minus_oneif next_interval > self._current_interval or ending or reset:if time:remaining_str = self._get_remaining(index)else:remaining_str = ''if ending:statement = '' + '' * self._length +''else:bars = int((indexp1 / self._total) * self._length)spaces = self._length - barspercentage = indexp1 / self._total * if reset:statement = ('' + '' * bars +'' * spaces + '' + '' % percentage + '')else:statement = ('' + '' * bars +'' * spaces + '' + '' % percentage + '' +remaining_str)if fmt_string:statement = fmt_string % statementif logger == '':if reprint:print('' + statement, end='', flush=True)else:print(statement)elif logger is not None:if isinstance(logger, str):logger = logging.getLogger(logger)logger.log(msg=statement, level=log_level)self._current_interval = next_intervalself._current_index = indexreturn statement", "docstring": "Plots a progress bar to the given `logger` for large for loops.\n\n To be used inside a for-loop at the end of the loop.\n\n :param index: Current index of for-loop\n :param total: Total size of for-loop\n :param percentage_step: Percentage step with which the bar should be updated\n :param logger:\n\n Logger to write to, if string 'print' is given, the print statement is\n used. Use None if you don't want to print or log the progressbar statement.\n\n :param log_level: Log level with which to log.\n :param reprint:\n\n If no new line should be plotted but carriage return (works only for printing)\n\n :param time: If the remaining time should be calculated and displayed\n :param length: Length of the bar in `=` signs.\n :param fmt_string:\n\n A string which contains exactly one `%s` in order to incorporate the progressbar.\n If such a string is given, ``fmt_string % progressbar`` is printed/logged.\n\n :param reset:\n\n If the progressbar should be restarted. If progressbar is called with a lower\n index than the one before, the progressbar is automatically restarted.\n\n :return:\n\n The progressbar string or None if the string has not been updated.", "id": "f4218:c0:m3"} {"signature": "def progressbar(index, total, percentage_step=, logger='', log_level=logging.INFO,reprint=True, time=True, length=, fmt_string=None, reset=False):", "body": "return _progressbar(index=index, total=total, percentage_step=percentage_step,logger=logger, log_level=log_level, reprint=reprint,time=time, length=length, fmt_string=fmt_string, reset=reset)", "docstring": "Plots a progress bar to the given `logger` for large for loops.\n\n To be used inside a for-loop at the end of the loop:\n\n .. code-block:: python\n\n for irun in range(42):\n my_costly_job() # Your expensive function\n progressbar(index=irun, total=42, reprint=True) # shows a growing progressbar\n\n\n There is no initialisation of the progressbar necessary before the for-loop.\n The progressbar will be reset automatically if used in another for-loop.\n\n :param index: Current index of for-loop\n :param total: Total size of for-loop\n :param percentage_step: Steps with which the bar should be plotted\n :param logger:\n\n Logger to write to - with level INFO. If string 'print' is given, the print statement is\n used. Use ``None`` if you don't want to print or log the progressbar statement.\n\n :param log_level: Log level with which to log.\n :param reprint:\n\n If no new line should be plotted but carriage return (works only for printing)\n\n :param time: If the remaining time should be estimated and displayed\n :param length: Length of the bar in `=` signs.\n :param fmt_string:\n\n A string which contains exactly one `%s` in order to incorporate the progressbar.\n If such a string is given, ``fmt_string % progressbar`` is printed/logged.\n\n :param reset:\n\n If the progressbar should be restarted. If progressbar is called with a lower\n index than the one before, the progressbar is automatically restarted.\n\n :return:\n\n The progressbar string or `None` if the string has not been updated.", "id": "f4218:m3"} {"signature": "def _get_argspec(func):", "body": "if inspect.isclass(func):func = func.__init__if not inspect.isfunction(func):return [], Falseparameters = inspect.signature(func).parametersargs = []uses_starstar = Falsefor par in parameters.values():if (par.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD orpar.kind == inspect.Parameter.KEYWORD_ONLY):args.append(par.name)elif par.kind == inspect.Parameter.VAR_KEYWORD:uses_starstar = Truereturn args, uses_starstar", "docstring": "Helper function to support both Python versions", "id": "f4218:m4"} {"signature": "def port_to_tcp(port=None):", "body": "domain_name = socket.getfqdn()try:addr_list = socket.getaddrinfo(domain_name, None)except Exception:addr_list = socket.getaddrinfo('', None)family, socktype, proto, canonname, sockaddr = addr_list[]host = convert_ipv6(sockaddr[])address = '' + hostif port is None:port = ()if not isinstance(port, int):context = zmq.Context()try:socket_ = context.socket(zmq.REP)socket_.ipv6 = is_ipv6(address)port = socket_.bind_to_random_port(address, *port)except Exception:print(''.format(address, addr_list))pypet_root_logger = logging.getLogger('')pypet_root_logger.exception(''.format(address))raisesocket_.close()context.term()return address + '' + str(port)", "docstring": "Returns local tcp address for a given `port`, automatic port if `None`", "id": "f4218:m10"} {"signature": "def merge_all_in_folder(folder, ext='',dynamic_imports=None,storage_service=None,force=False,ignore_data=(),move_data=False,delete_other_files=False,keep_info=True,keep_other_trajectory_info=True,merge_config=True,backup=True):", "body": "in_dir = os.listdir(folder)all_files = []for file in in_dir:full_file = os.path.join(folder, file)if os.path.isfile(full_file):_, extension = os.path.splitext(full_file)if extension == ext:all_files.append(full_file)all_files = sorted(all_files)trajs = []for full_file in all_files:traj = load_trajectory(index=-,storage_service=storage_service,filename=full_file,load_data=,force=force,dynamic_imports=dynamic_imports)trajs.append(traj)first_traj = trajs.pop()first_traj.f_merge_many(trajs,ignore_data=ignore_data,move_data=move_data,delete_other_trajectory=False,keep_info=keep_info,keep_other_trajectory_info=keep_other_trajectory_info,merge_config=merge_config,backup=backup)if delete_other_files:for file in all_files[:]:os.remove(file)return first_traj", "docstring": "Merges all files in a given folder.\n\n IMPORTANT: Does not check if there are more than 1 trajectory in a file. Always\n uses the last trajectory in file and ignores the other ones.\n\n Trajectories are merged according to the alphabetical order of the files,\n i.e. the resulting merged trajectory is found in the first file\n (according to lexicographic ordering).\n\n :param folder: folder (not recursive) where to look for files\n :param ext: only files with the given extension are used\n :param dynamic_imports: Dynamic imports for loading\n :param storage_service: storage service to use, leave `None` to use the default one\n :param force: If loading should be forced.\n :param delete_other_files: Deletes files of merged trajectories\n\n All other parameters as in `f_merge_many` of the trajectory.\n\n :return: The merged traj", "id": "f4219:m0"} {"signature": "def next(self):", "body": "while True:try:return next(self._current)except StopIteration:try:self._current = iter(self._chain.popleft())except IndexError:raise StopIteration('')", "docstring": "Returns next element from chain.\n\n More precisely, it returns the next element of the\n foremost iterator. If this iterator is empty it moves iteratively\n along the chain of available iterators to pick the new foremost one.\n\n Raises StopIteration if there are no elements left.", "id": "f4222:c1:m2"} {"signature": "def __init__(self, ndarray):", "body": "self._ndarray = ndarray", "docstring": "Creates a new hashable object encapsulating an ndarray.\n\n :param ndarray: The wrapped ndarray.", "id": "f4222:c3:m0"} {"signature": "def nested_equal(a, b):", "body": "if a is b:return Trueif a is None or b is None:return Falsea_sparse = spsp.isspmatrix(a)b_sparse = spsp.isspmatrix(b)if a_sparse != b_sparse:return Falseif a_sparse:if a.nnz == :return b.nnz == else:return not np.any((a != b).data)a_series = isinstance(a, pd.Series)b_series = isinstance(b, pd.Series)if a_series != b_series:return Falseif a_series:try:eq = (a == b).all()return eqexcept (TypeError, ValueError):if not len(a) == len(b):return Falsefor idx, itema in enumerate(a):itemb = b[idx]if not nested_equal(itema, itemb):return Falsereturn Truea_frame = isinstance(a, pd.DataFrame)b_frame = isinstance(b, pd.DataFrame)if a_frame != b_frame:return Falseif a_frame:try:if a.empty and b.empty:return Truenew_frame = a == bnew_frame = new_frame | (pd.isnull(a) & pd.isnull(b))if isinstance(new_frame, pd.DataFrame):return np.all(new_frame.as_matrix())except (ValueError, TypeError):for name in a:cola = a[name]if not name in b:return Falsecolb = b[name]if not len(cola) == len(colb):return Falsefor idx, itema in enumerate(cola):itemb = colb[idx]if not nested_equal(itema, itemb):return Falsereturn Truea_array = isinstance(a, np.ndarray)b_array = isinstance(b, np.ndarray)if a_array != b_array:return Falseif a_array:if a.shape != b.shape:return Falsereturn np.all(a == b)a_list = isinstance(a, (Sequence, list, tuple))b_list = isinstance(b, (Sequence, list, tuple))if a_list != b_list:return Falseif a_list:return all(nested_equal(x, y) for x, y in zip(a, b))a_mapping = isinstance(a, (Mapping, dict))b_mapping = isinstance(b, (Mapping, dict))if a_mapping != b_mapping:return Falseif a_mapping:keys_a = a.keys()if set(keys_a) != set(b.keys()):return Falsereturn all(nested_equal(a[k], b[k]) for k in keys_a)equality = NotImplementedtry:equality = a.__eq__(b)except (AttributeError, NotImplementedError, TypeError, ValueError):passif equality is NotImplemented:try:equality = b.__eq__(a)except (AttributeError, NotImplementedError, TypeError, ValueError):passif equality is NotImplemented:try:cmp = a.__cmp__(b)if cmp is not NotImplemented:equality = cmp == except (AttributeError, NotImplementedError, TypeError, ValueError):passif equality is NotImplemented:try:cmp = b.__cmp__(a)if cmp is not NotImplemented:equality = cmp == except (AttributeError, NotImplementedError, TypeError, ValueError):passif equality is not NotImplemented:try:return bool(equality)except (AttributeError, NotImplementedError, TypeError, ValueError):passattributes_a = get_all_attributes(a)attributes_b = get_all_attributes(b)if len(attributes_a) != len(attributes_b):return Falseif len(attributes_a) > :keys_a = list(attributes_a.keys())if set(keys_a) != set(attributes_b.keys()):return Falsereturn all(nested_equal(attributes_a[k], attributes_b[k]) for k in keys_a)return False", "docstring": "Compares two objects recursively by their elements.\n\n Also handles numpy arrays, pandas data and sparse matrices.\n\n First checks if the data falls into the above categories.\n If not, it is checked if a or b are some type of sequence or mapping and\n the contained elements are compared.\n If this is not the case, it is checked if a or b do provide a custom `__eq__` that\n evaluates to a single boolean value.\n If this is not the case, the attributes of a and b are compared.\n If this does not help either, normal `==` is used.\n\n Assumes hashable items are not mutable in a way that affects equality.\n Based on the suggestion from HERE_, thanks again Lauritz V. Thaulow :-)\n\n .. _HERE: http://stackoverflow.com/questions/18376935/best-practice-for-equality-in-python", "id": "f4224:m3"} {"signature": "def _collect_section(self, section):", "body": "kwargs = {}try:if self.parser.has_section(section):options = self.parser.options(section)for option in options:str_val = self.parser.get(section, option)val = ast.literal_eval(str_val)kwargs[option] = valreturn kwargsexcept:raise", "docstring": "Collects all settings within a section", "id": "f4226:c0:m1"} {"signature": "def load_class(full_class_string):", "body": "class_data = full_class_string.split(\"\")module_path = \"\".join(class_data[:-])class_str = class_data[-]module = importlib.import_module(module_path)return getattr(module, class_str)", "docstring": "Loads a class from a string naming the module and class name.\n\n For example:\n >>> load_class(full_class_string = 'pypet.brian.parameter.BrianParameter')\n ", "id": "f4227:m0"} {"signature": "def _prfx_getattr_(obj, item):", "body": "if item.startswith('') or item.startswith(''):return getattr(obj, item[:])raise AttributeError('' % (obj.__class__.__name__, item))", "docstring": "Replacement of __getattr__", "id": "f4228:m8"} {"signature": "def _prfx_setattr_(obj, item, value):", "body": "if item.startswith(''):return setattr(obj, item[:], value)else:return super(obj.__class__, obj).__setattr__(item, value)", "docstring": "Replacement of __setattr__", "id": "f4228:m9"} {"signature": "def deprecated(msg=''):", "body": "def wrapper(func):@functools.wraps(func)def new_func(*args, **kwargs):warning_string = \"\" % func.__name__warning_string = warning_string + '' + msgwarnings.warn(warning_string,category=DeprecationWarning,)return func(*args, **kwargs)return new_funcreturn wrapper", "docstring": "This is a decorator which can be used to mark functions\n as deprecated. It will result in a warning being emitted\n when the function is used.\n\n :param msg:\n\n Additional message added to the warning.", "id": "f4228:m1"} {"signature": "def pipeline(self, pipeline):", "body": "self._user_pipeline = Trueself._map_arguments = Falsereturn self._execute_runs(pipeline)", "docstring": "You can make *pypet* supervise your whole experiment by defining a pipeline.\n\n `pipeline` is a function that defines the entire experiment. From pre-processing\n including setting up the trajectory over defining the actual simulation runs to\n post processing.\n\n The `pipeline` function needs to return TWO tuples with a maximum of three entries each.\n\n For example:\n\n ::\n\n return (runfunc, args, kwargs), (postproc, postproc_args, postproc_kwargs)\n\n Where `runfunc` is the actual simulation function thet gets passed the trajectory\n container and potentially additional arguments `args` and keyword arguments `kwargs`.\n This will be run by your environment with all parameter combinations.\n\n `postproc` is a post processing function that handles your computed results.\n The function must accept as arguments the trajectory container, a list of\n results (list of tuples (run idx, result) ) and potentially\n additional arguments `postproc_args` and keyword arguments `postproc_kwargs`.\n\n As for :func:`~pypet.environment.Environment.f_add_postproc`, this function can\n potentially extend the trajectory.\n\n If you don't want to apply post-processing, your pipeline function can also simply\n return the run function and the arguments:\n\n ::\n\n return runfunc, args, kwargs\n\n Or\n\n ::\n\n return runfunc, args\n\n Or\n\n ::\n\n return runfunc\n\n ``return runfunc, kwargs`` does NOT work, if you don't want to pass `args` do\n ``return runfunc, (), kwargs``.\n\n Analogously combinations like\n\n ::\n\n return (runfunc, args), (postproc,)\n\n work as well.\n\n :param pipeline:\n\n The pipleine function, taking only a single argument `traj`.\n And returning all functions necessary for your experiment.\n\n :return:\n\n List of the individual results returned by `runfunc`.\n\n Returns a LIST OF TUPLES, where first entry is the run idx and second entry\n is the actual result. In case of multiprocessing these are not necessarily\n ordered according to their run index, but ordered according to their finishing time.\n\n Does not contain results stored in the trajectory!\n In order to access these simply interact with the trajectory object,\n potentially after calling :func:`~pypet.trajectory.Trajectory.f_update_skeleton`\n and loading all results at once with :func:`~pypet.trajectory.f_load`\n or loading manually with :func:`~pypet.trajectory.f_load_items`.\n\n Even if you use multiprocessing without a pool the results returned by\n `runfunc` still need to be pickled.\n\n Results computed from `postproc` are not returned. `postproc` should not\n return any results except dictionaries if the trajectory should be expanded.", "id": "f4231:c0:m16"} {"signature": "def _add_wildcard_config(self):", "body": "for idx, pair in enumerate(self._traj._wildcard_functions.items()):wildcards, wc_function = pairfor jdx, wildcard in enumerate(wildcards):config_name = ('' %(self.name, idx, jdx))if not self._traj.f_contains('' + config_name):self._traj.f_add_config(Parameter, config_name, wildcard,comment='').f_lock()if hasattr(wc_function, ''):config_name = ('' %(self.name, idx))if not self._traj.f_contains('' + config_name):self._traj.f_add_config(Parameter, config_name, wc_function.__name__,comment='').f_lock()if wc_function.__doc__:config_name = ('' %(self.name, idx))if not self._traj.f_contains('' + config_name):self._traj.f_add_config(Parameter, config_name, wc_function.__doc__,comment='').f_lock()try:source = inspect.getsource(wc_function)config_name = ('' %(self.name, idx))if not self._traj.f_contains('' + config_name):self._traj.f_add_config(Parameter, config_name, source,comment='').f_lock()except Exception:pass", "docstring": "Adds config data about the wildcard functions", "id": "f4231:c0:m33"} {"signature": "def _single_run(kwargs):", "body": "pypet_root_logger = logging.getLogger('')traj = kwargs['']runfunc = kwargs['']runargs = kwargs['']kwrunparams = kwargs['']clean_up_after_run = kwargs['']automatic_storing = kwargs['']wrap_mode = kwargs['']idx = traj.v_idxtotal_runs = len(traj)pypet_root_logger.info('''''' % (idx, total_runs))traj.f_start_run(turn_into_run=True)result = runfunc(traj, *runargs, **kwrunparams)if automatic_storing:traj.f_store()if wrap_mode == pypetconstants.WRAP_MODE_LOCAL:result = ((traj.v_idx, result),traj.f_get_run_information(traj.v_idx, copy=False),traj.v_storage_service.references)traj.v_storage_service.free_references()else:result = ((traj.v_idx, result),traj.f_get_run_information(traj.v_idx, copy=False))traj.f_finalize_run(store_meta_data=False,clean_up=clean_up_after_run)pypet_root_logger.info('''''' % (idx, total_runs))return result", "docstring": "Performs a single run of the experiment.\n\n :param kwargs: Dict of arguments\n\n traj: The trajectory containing all parameters set to the corresponding run index.\n\n runfunc: The user's job function\n\n runargs: The arguments handed to the user's job function (as *args)\n\n runkwargs: The keyword arguments handed to the user's job function (as **kwargs)\n\n clean_up_after_run: Whether to clean up after the run\n\n automatic_storing: Whether or not the data should be automatically stored\n\n result_queue: A queue object to store results into in case a pool is used, otherwise None\n\n :return:\n\n Results computed by the user's job function which are not stored into the trajectory.\n Returns a nested tuple of run index and result and run information:\n ``((traj.v_idx, result), run_information_dict)``", "id": "f4231:m11"} {"signature": "def _configure_frozen_scoop(kwargs):", "body": "def _delete_old_scoop_rev_data(old_scoop_rev):if old_scoop_rev is not None:try:elements = shared.elementsfor key in elements:var_dict = elements[key]if old_scoop_rev in var_dict:del var_dict[old_scoop_rev]logging.getLogger('').debug('''' % old_scoop_rev)except AttributeError:logging.getLogger('').error('''' % old_scoop_rev)scoop_rev = kwargs.pop('')try:old_scoop_rev = _frozen_scoop_single_run.kwargs['']configured = old_scoop_rev == scoop_revexcept (AttributeError, KeyError):old_scoop_rev = Noneconfigured = Falseif not configured:_frozen_scoop_single_run.kwargs = shared.getConst(scoop_rev, timeout=)frozen_kwargs = _frozen_scoop_single_run.kwargsfrozen_kwargs[''] = scoop_revfrozen_kwargs[''].v_full_copy = frozen_kwargs['']if not scoop.IS_ORIGIN:_configure_niceness(frozen_kwargs)_configure_logging(frozen_kwargs, extract=False)_delete_old_scoop_rev_data(old_scoop_rev)logging.getLogger('').info('' % str(scoop.worker))", "docstring": "Wrapper function that configures a frozen SCOOP set up.\n\n Deletes of data if necessary.", "id": "f4231:m5"} {"signature": "def _configure_pool(kwargs):", "body": "_pool_single_run.storage_service = kwargs['']_configure_niceness(kwargs)_configure_logging(kwargs, extract=False)", "docstring": "Configures the pool and keeps the storage service", "id": "f4231:m2"} {"signature": "def _get_results_from_queue(self, result_queue, results, n, total_runs):", "body": "while not result_queue.empty():result = result_queue.get()n = self._check_result_and_store_references(result, results, n, total_runs)return n", "docstring": "Extract all available results from the queue and returns the increased n", "id": "f4231:c0:m35"} {"signature": "def _check_result_and_store_references(self, result, results, n, total_runs):", "body": "if result[] == sigint_handling.SIGINT:self._stop_iteration = Trueresult = result[] if result is not None:if self._wrap_mode == pypetconstants.WRAP_MODE_LOCAL:self._multiproc_wrapper.store_references(result[])self._traj._update_run_information(result[])results.append(result[])if self._resumable:self._trigger_result_snapshot(result[:])self._show_progress(n, total_runs)n += return n", "docstring": "Checks for SIGINT and if reference wrapping and stores references.", "id": "f4231:c0:m36"} {"signature": "def _prepare_queue(self):", "body": "if self._queue is None:if self._use_manager:if self._manager is None:self._manager = multip.Manager()self._queue = self._manager.Queue(maxsize=self._queue_maxsize)else:self._queue = multip.Queue(maxsize=self._queue_maxsize)self._logger.info('')queue_handler = QueueStorageServiceWriter(self._storage_service, self._queue,self._gc_interval)self._queue_process = multip.Process(name='', target=_wrap_handling,args=(dict(handler=queue_handler,logging_manager=self._logging_manager,graceful_exit=self._graceful_exit),))self._queue_process.start()self._queue_wrapper = QueueStorageServiceSender(self._queue)self._traj.v_storage_service = self._queue_wrapper", "docstring": "Replaces the trajectory's service with a queue sender and starts the queue process.", "id": "f4231:c1:m17"} {"signature": "def __repr__(self):", "body": "repr_string = '' % (self.__class__.__name__, self.name,self.trajectory.v_name)return repr_string", "docstring": "String representation of environment", "id": "f4231:c0:m2"} {"signature": "def _trigger_result_snapshot(self, result):", "body": "timestamp = result[]['']timestamp_str = repr(timestamp).replace('', '')filename = '' % timestamp_strextension = ''dump_filename = os.path.join(self._resume_path, filename + extension)dump_file = open(dump_filename, '')dill.dump(result, dump_file, protocol=)dump_file.flush()dump_file.close()extension = ''rename_filename = os.path.join(self._resume_path, filename + extension)shutil.move(dump_filename, rename_filename)", "docstring": "Triggers a snapshot of the results for continuing\n\n :param result: Currently computed result", "id": "f4231:c0:m37"} {"signature": "@propertydef current_idx(self):", "body": "return self._current_idx", "docstring": "The current run index that is the next one to be executed.\n\n Can be set manually to make the environment consider old non-completed ones.", "id": "f4231:c0:m9"} {"signature": "def _configure_logging(kwargs, extract=True):", "body": "try:logging_manager = kwargs['']if extract:logging_manager.extract_replacements(kwargs[''])logging_manager.make_logging_handlers_and_tools(multiproc=True)except Exception as exc:sys.stderr.write('' % repr(exc))traceback.print_exc()", "docstring": "Requests the logging manager to configure logging.\n\n :param extract:\n\n If naming data should be extracted from the trajectory", "id": "f4231:m8"} {"signature": "def _prepare_sumatra(self):", "body": "reason = self._sumatra_reasonif reason:reason += ''if self._traj.v_comment:commentstr = '' % self._traj.v_commentelse:commentstr = ''reason += '' %(self._traj.v_name,commentstr,str(list(self._traj._explored_parameters.keys())))self._logger.info('' % reason)self._sumatra_reason = reasonself._loaded_sumatatra_project = load_project(self._sumatra_project)if self._traj.f_contains('', shortcuts=False):param_dict = self._traj.parameters.f_to_dict(fast_access=False)for param_name in list(param_dict.keys()):param = param_dict[param_name]if param.f_has_range():param_dict[param_name] = param.f_get_range()else:param_dict[param_name] = param.f_get()else:param_dict = {}relpath = os.path.relpath(sys.modules[''].__file__, self._sumatra_project)executable = PythonExecutable(path=sys.executable)self._sumatra_record = self._loaded_sumatatra_project.new_record(parameters=param_dict,main_file=relpath,executable=executable,label=self._sumatra_label,reason=reason)", "docstring": "Prepares a sumatra record", "id": "f4231:c0:m21"} {"signature": "@propertydef traj(self):", "body": "return self.trajectory", "docstring": "Equivalent to env.trajectory", "id": "f4231:c0:m8"} {"signature": "def _finish_sumatra(self):", "body": "finish_time = self._start_timestamp - self._finish_timestampself._sumatra_record.duration = finish_timeself._sumatra_record.output_data = self._sumatra_record.datastore.find_new_data(self._sumatra_record.timestamp)self._loaded_sumatatra_project.add_record(self._sumatra_record)self._loaded_sumatatra_project.save()sumatra_label = self._sumatra_record.labelconfig_name = '' % str(sumatra_label)conf_list = []if not self._traj.f_contains('' + config_name):conf1 = self._traj.f_add_config(Parameter, config_name, str(sumatra_label),comment='')conf_list.append(conf1)if self._sumatra_reason:config_name = '' % str(sumatra_label)if not self._traj.f_contains('' + config_name):conf2 = self._traj.f_add_config(Parameter, config_name,str(self._sumatra_reason),comment='')conf_list.append(conf2)if self._automatic_storing and conf_list:self._traj.f_store_items(conf_list)self._logger.info('''' % str(self._sumatra_reason))", "docstring": "Saves a sumatra record", "id": "f4231:c0:m22"} {"signature": "def _execute_runs(self, pipeline):", "body": "if self._start_timestamp is None:self._start_timestamp = time.time()if self._map_arguments and self._resumable:raise ValueError('''')if self._sumatra_project is not None:self._prepare_sumatra()if pipeline is not None:results = []self._prepare_runs(pipeline)else:results = self._prepare_resume()if self._runfunc is not None:self._traj._run_by_environment = Trueif self._graceful_exit:sigint_handling.start()try:self._inner_run_loop(results)finally:self._traj._run_by_environment = Falseself._stop_iteration = Falseif self._graceful_exit:sigint_handling.finalize()self._add_wildcard_config()if self._automatic_storing:self._logger.info('''''' %self._traj.v_name)self._traj.f_store()self._logger.info('''''' %self._traj.v_name)self._finish_timestamp = time.time()findatetime = datetime.datetime.fromtimestamp(self._finish_timestamp)startdatetime = datetime.datetime.fromtimestamp(self._start_timestamp)self._runtime = str(findatetime - startdatetime)conf_list = []config_name = '' % self.nameif not self._traj.f_contains('' + config_name):conf1 = self._traj.f_add_config(Parameter, config_name, self._start_timestamp,comment='''''''')conf_list.append(conf1)config_name = '' % self.nameif not self._traj.f_contains('' + config_name):conf2 = self._traj.f_add_config(Parameter, config_name, self._finish_timestamp,comment='')else:conf2 = self._traj.f_get('' + config_name)conf2.f_unlock()conf2.f_set(self._finish_timestamp)conf_list.append(conf2)config_name = '' % self.nameif not self._traj.f_contains('' + config_name):conf3 = self._traj.f_add_config(Parameter, config_name, self._runtime,comment='')else:conf3 = self._traj.f_get('' + config_name)conf3.f_unlock()conf3.f_set(self._runtime)conf_list.append(conf3)if self._automatic_storing:self._traj.f_store_items(conf_list, store_data=pypetconstants.OVERWRITE_DATA)if hasattr(self._traj.v_storage_service, ''):self._traj.v_storage_service.finalize()incomplete = []for run_name in self._traj.f_get_run_names():if not self._traj._is_completed(run_name):incomplete.append(run_name)if len(incomplete) > :self._logger.error('''' % (self._traj.v_name,''.join(incomplete)))else:self._logger.info('' %self._traj.v_name)if self._sumatra_project is not None:self._finish_sumatra()return results", "docstring": "Starts the individual single runs.\n\n Starts runs sequentially or initiates multiprocessing.\n\n :param pipeline:\n\n A pipeline function producing the run function the corresponding arguments\n and postprocessing function and arguments\n\n :return:\n\n List of tuples, where each tuple contains the run idx and the result.", "id": "f4231:c0:m32"} {"signature": "def _frozen_pool_single_run(kwargs):", "body": "idx = kwargs.pop('')frozen_kwargs = _frozen_pool_single_run.kwargsfrozen_kwargs.update(kwargs) traj = frozen_kwargs['']traj.f_set_crun(idx)return _sigint_handling_single_run(frozen_kwargs)", "docstring": "Single run wrapper for the frozen pool, makes a single run and passes kwargs", "id": "f4231:m1"} {"signature": "def __dir__(self):", "body": "result = set()result.update(dir(self.__class__), self.__all_slots__)if hasattr(self, ''):result.update(self.__dict__.keys())return list(result)", "docstring": "Includes all slots in the `dir` method", "id": "f4233:c1:m2"} {"signature": "def __setitem__(self, key, value):", "body": "self.f_set(**{key: value})", "docstring": "Almost equivalent to calling __setattr__.\n\n Treats integer values as `f_get`.", "id": "f4236:c0:m4"} {"signature": "def f_is_empty(self):", "body": "return len(self._dict) == ", "docstring": "Checks if annotations are empty", "id": "f4236:c0:m7"} {"signature": "def __getitem__(self, item):", "body": "return self.f_get(item)", "docstring": "Equivalent to calling f_get()", "id": "f4236:c0:m3"} {"signature": "def f_get(self, *args):", "body": "if len(args) == :if len(self._dict) == :return self._dict[list(self._dict.keys())[]]elif len(self._dict) > :raise ValueError('''' %(str(list(self._dict.keys()))))else:raise AttributeError('')result_list = []for name in args:name = self._translate_key(name)try:result_list.append(self._dict[name])except KeyError:raise AttributeError('' % name)if len(args) == :return result_list[]else:return tuple(result_list)", "docstring": "Returns annotations\n\n If len(args)>1, then returns a list of annotations.\n\n `f_get(X)` with *X* integer will return the annotation with name `annotation_X`.\n\n If the annotation contains only a single entry you can call `f_get()` without arguments.\n If you call `f_get()` and the annotation contains more than one element a ValueError is\n thrown.", "id": "f4236:c0:m13"} {"signature": "@propertydef v_annotations(self):", "body": "return self._annotations", "docstring": "Annotation feature of a trajectory node.\n\n Store some short additional information about your nodes here.\n If you use the standard HDF5 storage service, they will be stored as hdf5 node\n attributes_.\n\n .. _attributes: http://pytables.github.io/usersguide/libref/declarative_classes.html#the-attributeset-class", "id": "f4236:c1:m1"} {"signature": "def check_log_config(self):", "body": "if self.report_progress:if self.report_progress is True:self.report_progress = (, '', logging.INFO)elif isinstance(self.report_progress, (int, float)):self.report_progress = (self.report_progress, '', logging.INFO)elif isinstance(self.report_progress, str):self.report_progress = (, self.report_progress, logging.INFO)elif len(self.report_progress) == :self.report_progress = (self.report_progress[], self.report_progress[],logging.INFO)if self.log_config:if self.log_config == pypetconstants.DEFAULT_LOGGING:pypet_path = os.path.abspath(os.path.dirname(__file__))init_path = os.path.join(pypet_path, '')self.log_config = os.path.join(init_path, '')if isinstance(self.log_config, str):if not os.path.isfile(self.log_config):raise ValueError('''' % self.log_config)parser = NoInterpolationParser()parser.read(self.log_config)elif isinstance(self.log_config, cp.RawConfigParser):parser = self.log_configelse:parser = Noneif parser is not None:self._sp_config = self._parser_to_string_io(parser)self._mp_config = self._find_multiproc_options(parser)if self._mp_config is not None:self._mp_config = self._parser_to_string_io(self._mp_config)elif isinstance(self.log_config, dict):self._sp_config = self.log_configself._mp_config = self._find_multiproc_dict(self._sp_config)if self.log_stdout:if self.log_stdout is True:self.log_stdout = ('', logging.INFO)if isinstance(self.log_stdout, str):self.log_stdout = (self.log_stdout, logging.INFO)if isinstance(self.log_stdout, int):self.log_stdout = ('', self.log_stdout)", "docstring": "Checks and converts all settings if necessary passed to the Manager.\n\n Searches for multiprocessing options as well.", "id": "f4237:c1:m11"} {"signature": "def make_logging_handlers_and_tools(self, multiproc=False):", "body": "log_stdout = self.log_stdoutif sys.stdout is self._stdout_to_logger:log_stdout = Falseif self.log_config:if multiproc:proc_log_config = self._mp_configelse:proc_log_config = self._sp_configif proc_log_config:if isinstance(proc_log_config, dict):new_dict = self._handle_dict_config(proc_log_config)dictConfig(new_dict)else:parser = self._handle_config_parsing(proc_log_config)memory_file = self._parser_to_string_io(parser)fileConfig(memory_file, disable_existing_loggers=False)if log_stdout:std_name, std_level = self.log_stdoutstdout = StdoutToLogger(std_name, log_level=std_level)stdout.start()self._tools.append(stdout)", "docstring": "Creates logging handlers and redirects stdout.", "id": "f4237:c1:m14"} {"signature": "def _change_logging_kwargs(kwargs):", "body": "log_levels = kwargs.pop('', None)log_folder = kwargs.pop('', '')logger_names = kwargs.pop('', '')if log_levels is None:log_levels = kwargs.pop('', logging.INFO)log_multiproc = kwargs.pop('', True)if not isinstance(logger_names, (tuple, list)):logger_names = [logger_names]if not isinstance(log_levels, (tuple, list)):log_levels = [log_levels]if len(log_levels) == :log_levels = [log_levels[] for _ in logger_names]dictionary = copy.deepcopy(LOGGING_DICT)prefixes = ['']if not log_multiproc:for key in list(dictionary.keys()):if key.startswith(''):del dictionary[key]else:prefixes.append('')for prefix in prefixes:for handler_dict in dictionary[prefix + ''].values():if '' in handler_dict:filename = os.path.join(log_folder, handler_dict[''])filename = os.path.normpath(filename)handler_dict[''] = filenamedictionary[prefix + ''] = {}logger_dict = dictionary[prefix + '']for idx, logger_name in enumerate(logger_names):logger_dict[logger_name] = {'': log_levels[idx],'': list(dictionary[prefix + ''].keys())}kwargs[''] = dictionary", "docstring": "Helper function to turn the simple logging kwargs into a `log_config`.", "id": "f4237:m0"} {"signature": "def flush(self):", "body": "pass", "docstring": "No-op to fulfil API", "id": "f4237:c4:m4"} {"signature": "def finalize(self):", "body": "if self._original_steam is not None and self._redirection:sys.stdout = self._original_steamprint('')self._redirection = Falseself._original_steam = None", "docstring": "Disables redirection", "id": "f4237:c4:m5"} {"signature": "def simple_logging_config(func):", "body": "@functools.wraps(func)def new_func(self, *args, **kwargs):if use_simple_logging(kwargs):if '' in kwargs:raise ValueError('''''''')_change_logging_kwargs(kwargs)return func(self, *args, **kwargs)return new_func", "docstring": "Decorator to allow a simple logging configuration.\n\n This encompasses giving a `log_folder`, `logger_names` as well as `log_levels`.", "id": "f4237:m2"} {"signature": "def show_progress(self, n, total_runs):", "body": "if self.report_progress:percentage, logger_name, log_level = self.report_progressif logger_name == '':logger = ''else:logger = logging.getLogger(logger_name)if n == -:digits = int(math.log10(total_runs + )) + self._format_string = '' + '' % digits + ''fmt_string = self._format_string % (n + , total_runs) + ''reprint = log_level == progressbar(n, total_runs, percentage_step=percentage,logger=logger, log_level=log_level,fmt_string=fmt_string, reprint=reprint)", "docstring": "Displays a progressbar", "id": "f4237:c1:m3"} {"signature": "@staticmethoddef _parser_to_string_io(parser):", "body": "memory_file = StringIO()parser.write(memory_file)memory_file.flush()memory_file.seek()return memory_file", "docstring": "Turns a ConfigParser into a StringIO stream.", "id": "f4237:c1:m8"} {"signature": "@propertydef v_standard_result(self):", "body": "return self._standard_result", "docstring": "The standard result class used for result creation", "id": "f4238:c0:m92"} {"signature": "@v_idx.setter@not_in_rundef v_idx(self, idx):", "body": "self.f_set_crun(idx)", "docstring": "Changes the index to make the trajectory behave as a single run", "id": "f4238:c0:m15"} {"signature": "@propertydef v_with_links(self):", "body": "return self._with_links", "docstring": "Whether links should be considered in case using natural naming\n or squared bracket indexing", "id": "f4238:c0:m21"} {"signature": "def _merge_links(self, other_trajectory, used_runs, allowed_translations, ignore_data):", "body": "linked_items = other_trajectory._linked_byrun_name_dummys = set([f(-) for f in other_trajectory._wildcard_functions.values()])if len(linked_items) > :self._logger.info('')for old_linked_name in other_trajectory._linked_by:if old_linked_name in ignore_data:continuesplit_name = old_linked_name.split('')if any(x in run_name_dummys for x in split_name):self._logger.warning('''' %(old_linked_name, str(run_name_dummys)))continueold_link_dict = other_trajectory._linked_by[old_linked_name]split_name = old_linked_name.split('')if all(x in allowed_translations for x in split_name):new_linked_full_name = self._rename_full_name(old_linked_name,other_trajectory,used_runs=used_runs)else:new_linked_full_name = old_linked_namefor linking_node, link_set in old_link_dict.values():linking_full_name = linking_node.v_full_namesplit_name = linking_full_name .split('')if any(x in run_name_dummys for x in split_name):self._logger.warning('''''' %(linking_full_name, str(run_name_dummys)))split_name = linking_full_name .split('')if any(x in allowed_translations for x in split_name):new_linking_full_name = self._rename_full_name(linking_full_name,other_trajectory,used_runs=used_runs)else:new_linking_full_name = linking_full_namefor link in link_set:if (linking_full_name + '' + link) in ignore_data:continueif link in run_name_dummys:self._logger.warning('''''' %(link,linking_full_name,str(run_name_dummys)))continuetry:new_linked_item = self.f_get(new_linked_full_name,shortcuts=False)if self.f_contains(new_linking_full_name):new_linking_item = self.f_get(new_linking_full_name,shortcuts=False)else:new_linking_item = self.f_add_group(new_linking_full_name)if link in allowed_translations:run_indices, wildcards = other_trajectory._reversed_wildcards[link]link = self.f_wildcard(wildcards[], used_runs[run_indices[]])if not link in new_linking_item._links:new_linking_item.f_add_link(link, new_linked_item)else:self._logger.debug('' %(link, new_linked_item.v_full_name))except (AttributeError, ValueError) as exc:self._logger.error('''' %(link, linking_full_name, old_linked_name,repr(exc)))", "docstring": "Merges all links", "id": "f4238:c0:m58"} {"signature": "@propertydef v_comment(self):", "body": "return self._comment", "docstring": "Should be a nice descriptive comment", "id": "f4238:c0:m9"} {"signature": "def _preset(self, name, args, kwargs):", "body": "if self.f_contains(name, shortcuts=False):raise ValueError('''' % name)else:self._changed_default_parameters[name] = (args, kwargs)", "docstring": "Generic preset function, marks a parameter or config for presetting.", "id": "f4238:c0:m28"} {"signature": "def __copy__(self):", "body": "return self.f_copy(copy_leaves=True,with_links=True)", "docstring": "Returns a shallow copy", "id": "f4238:c0:m39"} {"signature": "@v_comment.setter@not_in_rundef v_comment(self, comment):", "body": "comment = str(comment)self._comment = comment", "docstring": "Sets the comment", "id": "f4238:c0:m10"} {"signature": "def _check_if_both_have_same_parameters(self, other_trajectory,ignore_data, consecutive_merge):", "body": "if not isinstance(other_trajectory, Trajectory):raise TypeError('''' % str(type(other_trajectory)))if self._stored and not consecutive_merge:self.f_load_skeleton()if other_trajectory._stored:other_trajectory.f_load_skeleton()other_wildcard_set = set(x[] for x in other_trajectory._wildcard_functions.keys())wildcard_set = set(x[] for x in self._wildcard_functions.keys())diff = wildcard_set.symmetric_difference(other_wildcard_set)if diff:raise TypeError('' %(str(wildcard_set), str(other_wildcard_set)))if self._stored:with self._nn_interface._disable_logging:self.f_load_items(self._parameters.keys(), only_empties=True)if other_trajectory._stored:with self._nn_interface._disable_logging:other_trajectory.f_load_items(other_trajectory._parameters.keys(),only_empties=True)self.f_restore_default()other_trajectory.f_restore_default()allmyparams = self._parameters.copy()allotherparams = other_trajectory._parameters.copy()if '' in self:my_traj_dpars = self._derived_parametersif self._stored:with self._nn_interface._disable_logging:self.f_load_items(my_traj_dpars.keys(), only_empties=True)allmyparams.update(my_traj_dpars)other_traj_dpars = other_trajectory._derived_parametersif other_trajectory._stored:with self._nn_interface._disable_logging:other_trajectory.f_load_items(other_traj_dpars.keys(), only_empties=True)allotherparams.update(other_traj_dpars)my_keyset = set(allmyparams.keys())other_keyset = set(allotherparams.keys())diff = my_keyset.symmetric_difference(other_keyset) - ignore_datarun_dummys = (self.f_wildcard('', -), other_trajectory.f_wildcard('', -))if diff:run_difference_can_be_resolved = Truefor full_name in diff:split_name = full_name.split('')if not any(x in self._run_information orx in other_trajectory._run_information orx in run_dummysfor x in split_name):run_difference_can_be_resolved = Falsebreakelif full_name in allotherparams:del allotherparams[full_name]if not run_difference_can_be_resolved:raise TypeError('''''''' % str(diff))for key, other_param in allotherparams.items():if key in ignore_data:continuemy_param = self.f_get(key)split_key = key.split('')if any(x in self._run_information orx in other_trajectory._run_informationfor x in split_key):passelse:if not my_param._values_of_same_type(my_param.f_get(), other_param.f_get()):raise TypeError('''' %(key, str(type(my_param.f_get())),str(type(other_param.f_get()))))", "docstring": "Checks if two trajectories live in the same space and can be merged.", "id": "f4238:c0:m50"} {"signature": "def f_get_wildcards(self):", "body": "return list(self._wildcard_keys.keys())", "docstring": "Returns a list of all defined wildcards", "id": "f4238:c0:m1"} {"signature": "@not_in_rundef f_restore_default(self):", "body": "self._idx = -self._crun = Nonefor param in self._explored_parameters.values():if param is not None:param._restore_default()", "docstring": "Restores the default value in all explored parameters and sets the\n v_idx property back to -1 and v_crun to None.", "id": "f4238:c0:m66"} {"signature": "@not_in_rundef f_shrink(self, force=False):", "body": "if self._stored and not force:raise TypeError('''')for param in self._explored_parameters.values():param.f_unlock()try:param._shrink()except Exception as exc:self._logger.error('' %(param.v_full_name, repr(exc)))self._explored_parameters = {}self._run_information = {}self._single_run_ids = {}self._add_run_info()self._test_run_addition()", "docstring": "Shrinks the trajectory and removes all exploration ranges from the parameters.\n Only possible if the trajectory has not been stored to disk before or was loaded as new.\n\n :param force:\n\n Usually you cannot shrink the trajectory if it has been stored to disk,\n because there's no guarantee that it is actually shrunk if there\n still exist explored parameters on disk. In case you are certain that\n you did not store explored parameters to disk set or you deleted all\n of them from disk set `force=True`.\n\n :raises: TypeError if the trajectory was stored before.", "id": "f4238:c0:m27"} {"signature": "def f_delete_link(self, link, remove_from_trajectory=False):", "body": "self.f_delete_links((link,), remove_from_trajectory)", "docstring": "Deletes a single link see :func:`~pypet.trajectory.Trajectory.f_delete_links`", "id": "f4238:c0:m118"} {"signature": "def _merge_parameters(self, other_trajectory, remove_duplicates=False,trial_parameter_name=None,ignore_data=()):", "body": "if trial_parameter_name:if remove_duplicates:self._logger.warning('''''')remove_duplicates = Falseparams_to_change = {}if trial_parameter_name:my_trial_parameter = self.f_get(trial_parameter_name)other_trial_parameter = other_trajectory.f_get(trial_parameter_name)if not isinstance(my_trial_parameter, BaseParameter):raise TypeError('''' % trial_parameter_name)if my_trial_parameter.f_has_range():my_trial_list = my_trial_parameter.f_get_range(copy=False)else:my_trial_list = [my_trial_parameter.f_get()]if other_trial_parameter.f_has_range():other_trial_list = other_trial_parameter.f_get_range(copy=False)else:other_trial_list = [other_trial_parameter.f_get()]mytrialset = set(my_trial_list)mymaxtrial_T1 = max(mytrialset) if mytrialset != set(range(mymaxtrial_T1 + )):raise TypeError('''''' % (mymaxtrial_T1, str(mytrialset)))othertrialset = set(other_trial_list)othermaxtrial_T2 = max(othertrialset) if othertrialset != set(range(othermaxtrial_T2 + )):raise TypeError('''''' %(othermaxtrial_T2, str(othertrialset)))trial_parameter_name = my_trial_parameter.v_full_nameif not trial_parameter_name in self._explored_parameters:self._explored_parameters[trial_parameter_name] = my_trial_parameterparams_to_change[trial_parameter_name] = (my_trial_parameter, other_trial_parameter)params_to_merge = other_trajectory._parameters.copy()params_to_merge.update(other_trajectory._derived_parameters)for ignore in ignore_data:if ignore in params_to_merge:del params_to_merge[ignore]run_name_dummys = set([f(-) for f in other_trajectory._wildcard_functions.values()])for key in params_to_merge:other_param = params_to_merge[key]split_key = key.split('')if any(x in other_trajectory._reversed_wildcards for x in split_key):continuemy_param = self.f_get(key)if not my_param._values_of_same_type(my_param.f_get(), other_param.f_get()):raise TypeError('''' % key)if my_param.v_full_name == trial_parameter_name:continueif (my_param.f_has_range() orother_param.f_has_range() ornot my_param._equal_values(my_param.f_get(), other_param.f_get())):params_to_change[key] = (my_param, other_param)if not my_param.f_has_range() and not other_param.f_has_range():remove_duplicates = Falseused_runs = {}for idx in range(len(other_trajectory)):used_runs[idx] = idxif remove_duplicates:for irun in range(len(other_trajectory)):for jrun in range(len(self)):change = Truefor my_param, other_param in params_to_change.values():if other_param.f_has_range():other_param._set_parameter_access(irun)if my_param.f_has_range():my_param._set_parameter_access(jrun)val1 = my_param.f_get()val2 = other_param.f_get()if not my_param._equal_values(val1, val2):change = Falsebreakif change:del used_runs[irun]breakfor my_param, other_param in params_to_change.values():other_param._restore_default()my_param._restore_default()adding_length = len(used_runs)starting_length = len(self)if adding_length == :return used_runs, []count = for key in sorted(used_runs.keys()):used_runs[key] = starting_length + countcount += for my_param, other_param in params_to_change.values():fullname = my_param.v_full_nameif fullname == trial_parameter_name:other_range = [x + mymaxtrial_T1 + for x in other_trial_list]else:if other_param.f_has_range():other_range = (x for jdx, x in enumerate(other_param.f_get_range(copy=False))if jdx in used_runs)else:other_range = (other_param.f_get() for _ in range(adding_length))if not my_param.f_has_range():my_param.f_unlock()my_param._explore((my_param.f_get() for _ in range(len(self))))my_param.f_unlock()my_param._expand(other_range)if not fullname in self._explored_parameters:self._explored_parameters[fullname] = my_paramreturn used_runs, list(params_to_change.keys())", "docstring": "Merges parameters from the other trajectory into the current one.\n\n The explored parameters in the current trajectory are directly enlarged (in RAM),\n no storage service is needed here. Later on in `f_merge` the storage service\n will be requested to store the enlarge parameters to disk.\n\n Note explored parameters are always enlarged. Unexplored parameters might become\n new explored parameters if they differ in their default values\n in the current and the other trajectory, respectively.\n\n :return: A tuple with two elements:\n\n 1.\n\n Dictionary of run index mappings from old trajectroy to the new one.\n\n 2.\n\n List of names of parameters that were altered.", "id": "f4238:c0:m62"} {"signature": "def f_store_items(self, iterator, *args, **kwargs):", "body": "if not self._stored:raise TypeError('''')fetched_items = self._nn_interface._fetch_items(STORE, iterator, args, kwargs)if fetched_items:self._storage_service.store(pypetconstants.LIST, fetched_items,trajectory_name=self.v_name)else:raise ValueError('''')", "docstring": "Stores individual items to disk.\n\n This function is useful if you calculated very large results (or large derived parameters)\n during runtime and you want to write these to disk immediately and empty them afterwards\n to free some memory.\n\n Instead of storing individual parameters or results you can also store whole subtrees with\n :func:`~pypet.naturalnaming.NNGroupNode.f_store_child`.\n\n\n You can pass the following arguments to `f_store_items`:\n\n :param iterator:\n\n An iterable containing the parameters or results to store, either their\n names or the instances. You can also pass group instances or names here\n to store the annotations of the groups.\n\n :param non_empties:\n\n Optional keyword argument (boolean),\n if `True` will only store the subset of provided items that are not empty.\n Empty parameters or results found in `iterator` are simply ignored.\n\n :param args: Additional arguments passed to the storage service\n\n :param kwargs:\n\n If you use the standard hdf5 storage service, you can pass the following additional\n keyword argument:\n\n :param overwrite:\n\n List names of parts of your item that should\n be erased and overwritten by the new data in your leaf.\n You can also set `overwrite=True`\n to overwrite all parts.\n\n For instance:\n\n >>> traj.f_add_result('mygroup.myresult', partA=42, partB=44, partC=46)\n >>> traj.f_store()\n >>> traj.mygroup.myresult.partA = 333\n >>> traj.mygroup.myresult.partB = 'I am going to change to a string'\n >>> traj.f_store_item('mygroup.myresult', overwrite=['partA', 'partB'])\n\n Will store `'mygroup.myresult'` to disk again and overwrite the parts\n `'partA'` and `'partB'` with the new values `333` and\n `'I am going to change to a string'`.\n The data stored as `partC` is not changed.\n\n Be aware that you need to specify the names of parts as they were stored\n to HDF5. Depending on how your leaf construction works, this may differ\n from the names the data might have in your leaf in the trajectory container.\n\n Note that massive overwriting will fragment and blow up your HDF5 file.\n Try to avoid changing data on disk whenever you can.\n\n :raises:\n\n TypeError:\n\n If the (parent) trajectory has never been stored to disk. In this case\n use :func:`pypet.trajectory.f_store` first.\n\n ValueError: If no item could be found to be stored.\n\n Note if you use the standard hdf5 storage service, there are no additional arguments\n or keyword arguments to pass!", "id": "f4238:c0:m113"} {"signature": "@propertydef v_storage_service(self):", "body": "return self._storage_service", "docstring": "The service that can store the trajectory to disk or wherever.\n\n Default is None or if a filename was provided on construction\n the :class:`~pypet.storageservice.HDF5StorageService`.", "id": "f4238:c0:m11"} {"signature": "@propertydef v_standard_parameter(self):", "body": "return self._standard_parameter", "docstring": "The standard parameter used for parameter creation", "id": "f4238:c0:m90"} {"signature": "def f_add_wildcard_functions(self, func_dict):", "body": "for wildcards, function in func_dict.items():if not isinstance(wildcards, tuple):wildcards = (wildcards,)for wildcard in wildcards:if wildcard in self._wildcard_keys:raise ValueError('' % wildcard)self._wildcard_keys[wildcard] = wildcardsself._wildcard_functions[wildcards] = functionself._logger.debug('' % str(wildcards))", "docstring": "TODO", "id": "f4238:c0:m3"} {"signature": "def _make_reversed_wildcards(self, old_length=-):", "body": "if len(self._reversed_wildcards) > :start = old_lengthelse:start = -for wildcards, func in self._wildcard_functions.items():for irun in range(start, len(self)):translated_name = func(irun)if not translated_name in self._reversed_wildcards:self._reversed_wildcards[translated_name] = ([], wildcards)self._reversed_wildcards[translated_name][].append(irun)", "docstring": "Creates a full mapping from all wildcard translations to the corresponding wildcards", "id": "f4238:c0:m52"} {"signature": "def _set_start(self):", "body": "init_time = time.time()formatted_time = datetime.datetime.fromtimestamp(init_time).strftime('')run_info_dict = self._run_information[self.v_crun]run_info_dict[''] = init_timerun_info_dict[''] = formatted_timeif self._environment_hexsha is not None:run_info_dict[''] = self._environment_hexsha[:]", "docstring": "Sets the start timestamp and formatted time to the current time.", "id": "f4238:c0:m75"} {"signature": "@not_in_rundef f_explore(self, build_dict):", "body": "for run_idx in range(len(self)):if self.f_is_completed(run_idx):raise TypeError('''')added_explored_parameters = []try:length = len(self)for key, builditerable in build_dict.items():act_param = self.f_get(key)if not act_param.v_is_leaf or not act_param.v_is_parameter:raise ValueError('' % key)act_param.f_unlock()act_param._explore(builditerable)added_explored_parameters.append(act_param)full_name = act_param.v_full_nameself._explored_parameters[full_name] = act_paramact_param._explored = Trueif len(self._explored_parameters) == :length = act_param.f_get_range_length()elif not length == act_param.f_get_range_length():raise ValueError('')for irun in range(length):self._add_run_info(irun)self._test_run_addition(length)except Exception:for param in added_explored_parameters:param.f_unlock()param._shrink()param._explored = Falsefull_name = param.v_full_namedel self._explored_parameters[full_name]if len(self._explored_parameters) == :self.f_shrink(force=True)raise", "docstring": "Prepares the trajectory to explore the parameter space.\n\n\n To explore the parameter space you need to provide a dictionary with the names of the\n parameters to explore as keys and iterables specifying the exploration ranges as values.\n\n All iterables need to have the same length otherwise a ValueError is raised.\n A ValueError is also raised if the names from the dictionary map to groups or results\n and not parameters.\n\n If your trajectory is already explored but not stored yet and your parameters are\n not locked you can add new explored parameters to the current ones if their\n iterables match the current length of the trajectory.\n\n Raises an AttributeError if the names from the dictionary are not found at all in\n the trajectory and NotUniqueNodeError if the keys not unambiguously map\n to single parameters.\n\n Raises a TypeError if the trajectory has been stored already, please use\n :func:`~pypet.trajectory.Trajectory.f_expand` then instead.\n\n Example usage:\n\n >>> traj.f_explore({'groupA.param1' : [1,2,3,4,5], 'groupA.param2':['a','b','c','d','e']})\n\n Could also be called consecutively:\n\n >>> traj.f_explore({'groupA.param1' : [1,2,3,4,5]})\n >>> traj.f_explore({'groupA.param2':['a','b','c','d','e']})\n\n NOTE:\n\n Since parameters are very conservative regarding the data they accept\n (see :ref:`type_conservation`), you sometimes won't be able to use Numpy arrays\n for exploration as iterables.\n\n For instance, the following code snippet won't work:\n\n ::\n\n import numpy a np\n from pypet.trajectory import Trajectory\n traj = Trajectory()\n traj.f_add_parameter('my_float_parameter', 42.4,\n comment='My value is a standard python float')\n\n traj.f_explore( { 'my_float_parameter': np.arange(42.0, 44.876, 0.23) } )\n\n\n This will result in a `TypeError` because your exploration iterable\n `np.arange(42.0, 44.876, 0.23)` contains `numpy.float64` values\n whereas you parameter is supposed to use standard python floats.\n\n Yet, you can use Numpys `tolist()` function to overcome this problem:\n\n ::\n\n traj.f_explore( { 'my_float_parameter': np.arange(42.0, 44.876, 0.23).tolist() } )\n\n\n Or you could specify your parameter directly as a numpy float:\n\n ::\n\n traj.f_add_parameter('my_float_parameter', np.float64(42.4),\n comment='My value is a numpy 64 bit float')", "id": "f4238:c0:m42"} {"signature": "def f_find_idx(self, name_list, predicate):", "body": "if self._is_run and not self.v_full_copy:raise TypeError('''')if isinstance(name_list, str):name_list = [name_list]iter_list = []for name in name_list:param = self.f_get(name)if not param.v_is_parameter:raise TypeError('' %(name, str(type(param))))if param.f_has_range():iter_list.append(iter(param.f_get_range(copy=False)))else:iter_list.append(itools.repeat(param.f_get(), len(self)))logic_iter = map(predicate, *iter_list)for idx, item in enumerate(logic_iter):if item:yield idx", "docstring": "Finds a single run index given a particular condition on parameters.\n\n ONLY useful for a single run if ``v_full_copy` was set to ``True``.\n Otherwise a TypeError is thrown.\n\n :param name_list:\n\n A list of parameter names the predicate applies to, if you have only a single\n parameter name you can omit the list brackets.\n\n :param predicate:\n\n A lambda predicate for filtering that evaluates to either ``True`` or ``False``\n\n :return: A generator yielding the matching single run indices\n\n Example:\n\n >>> predicate = lambda param1, param2: param1==4 and param2 in [1.0, 2.0]\n >>> iterator = traj.f_find_idx(['groupA.param1', 'groupA.param2'], predicate)\n >>> [x for x in iterator]\n [0, 2, 17, 36]", "id": "f4238:c0:m71"} {"signature": "@propertydef v_timestamp(self):", "body": "return self._timestamp", "docstring": "Float timestamp of creation time", "id": "f4238:c0:m88"} {"signature": "@v_no_clobber.setterdef v_no_clobber(self, value):", "body": "self._no_clobber = bool(value)", "docstring": "Sets no_clobber", "id": "f4238:c0:m97"} {"signature": "@kwargs_api_change('')@kwargs_api_change('')@kwargs_mutual_exclusive('', '', lambda x: not x)def f_store(self, only_init=False, store_data=pypetconstants.STORE_DATA,max_depth=None):", "body": "if self._is_run:if self._new_nodes or self._new_links:self._storage_service.store(pypetconstants.SINGLE_RUN, self,trajectory_name=self.v_name,recursive=not only_init,store_data=store_data,max_depth=max_depth)else:self._storage_service.store(pypetconstants.TRAJECTORY, self,trajectory_name=self.v_name,only_init=only_init,store_data=store_data,max_depth=max_depth)self._stored = True", "docstring": "Stores the trajectory to disk and recursively all data in the tree.\n\n :param only_init:\n\n If you just want to initialise the store. If yes, only meta information about\n the trajectory is stored and none of the groups/leaves within the trajectory.\n Alternatively, you can pass `recursive=False`.\n\n :param store_data:\n\n Only considered if ``only_init=False``. Choose of the following:\n\n * :const:`pypet.pypetconstants.STORE_NOTHING`: (0)\n\n Nothing is store.\n\n * :const:`pypet.pypetconstants.STORE_DATA_SKIPPING`: (1)\n\n Speedy version of normal ``STORE_DATA`` will entirely skip groups\n (but not their children) and leaves if they have been stored before.\n No new data is added in this case.\n\n * :const:`pypet.pypetconstants.STORE_DATA`: (2)\n\n Stores every group and leave node. If they contain data that is not yet stored\n to disk it is added.\n\n * :const:`pypet.pypetconstants.OVERWRITE_DATA`: (3)\n\n Stores all groups and leave nodes and will delete all data on disk\n and overwrite it with the current data in RAM.\n\n **NOT RECOMMENDED**! Overwriting data on disk fragments the HDF5 file and\n yields badly compressed large files. Better stick to the concept\n write once and read many!\n\n\n If you use the HDF5 Storage Service usually (STORE_DATA (2)) only novel data\n is stored to disk.\n If you have results that have been stored to disk before only new data items are added and\n already present data is NOT overwritten.\n\n Overwriting (OVERWRITE_DATA (3)) existing data with the HDF5 storage service\n is not recommended due to fragmentation of the HDF5 file. Better stick to the concept\n write once, but read often.\n\n If you want to store individual parameters or results, you might want to\n take a look at :func:`~pypet.Trajectory.f_store_items`.\n To store whole subtrees of your trajectory check out\n :func:`~pypet.naturalnaming.NNGroupNode.f_store_child`.\n Note both functions require that your trajectory was stored to disk with `f_store`\n at least once before.\n\n\n **ATTENTION**: Calling `f_store` during a single run the behavior is different.\n\n To avoid re-storing the full trajectory in every single run, which is redundant,\n only sub-trees of the trajectory are really stored.\n\n The storage serivce looks for new data that is added below groups called `run_XXXXXXXXXX`\n and stores it where `XXXXXXXXX` is the index of this run. The `only_init` parameter is\n ignored in this case. You can avoid this behavior by using the argument from below.\n\n :param max_depth:\n\n Maximum depth to store tree (inclusive). During single runs `max_depth` is also counted\n from root.", "id": "f4238:c0:m64"} {"signature": "@not_in_run@kwargs_api_change('', '')@kwargs_api_change('', '')@kwargs_api_change('')@kwargs_api_change('')def f_merge(self, other_trajectory, trial_parameter=None, remove_duplicates=False,ignore_data=(),backup=True,move_data=False,delete_other_trajectory=False,keep_info=True,keep_other_trajectory_info=True,merge_config=True,consecutive_merge=False,slow_merge=False):", "body": "if consecutive_merge and trial_parameter is not None:self._logger.warning('''')if consecutive_merge and backup:self._logger.warning('''')timestamp = time.time()original_ignore_data = set(ignore_data)ignore_data = original_ignore_data.copy()old_len = len(self)self._check_if_both_have_same_parameters(other_trajectory, ignore_data, consecutive_merge)self._make_reversed_wildcards(old_length=old_len)other_trajectory._make_reversed_wildcards()if backup:other_trajectory.f_backup()self.f_backup()self._logger.info('')used_runs, changed_parameters = self._merge_parameters(other_trajectory,remove_duplicates,trial_parameter,ignore_data)ignore_data.update(set(changed_parameters))if len(used_runs) == :raise ValueError('''')rename_dict = {}self._logger.info('')allowed_translations = set([translation for translation, pair inother_trajectory._reversed_wildcards.items() ifany(x in used_runs for x in pair[])])self._merge_single_runs(other_trajectory, used_runs)self._logger.info('')self._merge_derived_parameters(other_trajectory=other_trajectory,used_runs=used_runs,rename_dict=rename_dict,allowed_translations=allowed_translations,ignore_data=ignore_data)self._merge_results(other_trajectory=other_trajectory,used_runs=used_runs,rename_dict=rename_dict,allowed_translations=allowed_translations,ignore_data=ignore_data)self._logger.info('')self._logger.info('')self._storage_service.store(pypetconstants.PREPARE_MERGE, self,trajectory_name=self.v_name,changed_parameters=changed_parameters,old_length=old_len)if not slow_merge:try:try:other_filename = other_trajectory.v_storage_service.filenameexcept AttributeError:self._logger.warning('''')other_filename = Noneself._storage_service.store(pypetconstants.MERGE, '', trajectory_name=self.v_name,other_trajectory_name=other_trajectory.v_name,rename_dict=rename_dict, move_nodes=move_data,delete_trajectory=delete_other_trajectory,other_filename=other_filename)except pex.NoSuchServiceError as exc:self._logger.exception('''''''''')slow_merge = Trueexcept ValueError as exc:self._logger.exception('''''''''')slow_merge = Trueif slow_merge:self._merge_slowly(other_trajectory, rename_dict)if merge_config:self._merge_config(other_trajectory)self._merge_links(other_trajectory=other_trajectory,used_runs=used_runs,allowed_translations=allowed_translations,ignore_data=original_ignore_data)self._logger.info('')formatted_time = datetime.datetime.fromtimestamp(timestamp).strftime('')hexsha = hashlib.sha1((self.v_name +str(self.v_timestamp) +other_trajectory.v_name +str(other_trajectory.v_timestamp) +VERSION).encode('')).hexdigest()short_hexsha = hexsha[:]if keep_info:merge_name = '' % (short_hexsha, formatted_time)config_name = '' % merge_nameself.f_add_config(config_name, len(used_runs),comment='')config_name = '' % merge_nameself.f_add_config(config_name, timestamp,comment='')config_name = '' % merge_nameself.f_add_config(config_name, hexsha,comment='')config_name = '' % merge_nameself.f_add_config(config_name, remove_duplicates,comment='')if original_ignore_data:config_name = '' % merge_nameself.f_add_config(config_name, tuple(original_ignore_data),comment='')config_name = '' % merge_nameself.f_add_config(config_name, len(self),comment='')self.config.merge.v_comment = ''if self.v_version != VERSION:config_name = '' % merge_nameself.f_add_config(config_name, self.v_version,comment='''')if trial_parameter is not None:config_name = '' % merge_nameself.f_add_config(config_name, len(other_trajectory),comment='')if keep_other_trajectory_info:if other_trajectory.v_version != self.v_version:config_name = '' % merge_nameself.f_add_config(config_name, other_trajectory.v_version,comment='''''')config_name = '' % merge_nameself.f_add_config(config_name, other_trajectory.v_name,comment='')config_name = '' % merge_nameself.f_add_config(config_name, other_trajectory.v_timestamp,comment='''')config_name = '' % merge_nameself.f_add_config(config_name, len(other_trajectory),comment='')if other_trajectory.v_comment:config_name = '' % merge_nameself.f_add_config(config_name, other_trajectory.v_comment,comment='')if not consecutive_merge:self._logger.info('')self.f_store(store_data=pypetconstants.STORE_DATA)self._reversed_wildcards = {}other_trajectory._reversed_wildcards = {}self._logger.info('')", "docstring": "Merges another trajectory into the current trajectory.\n\n Both trajectories must live in the same space. This means both need to have the same\n parameters with similar types of values.\n\n Note that links are also merged. There are exceptions: Links found under\n a generic run group called `run_ALL` or links linking to a node under such a\n group are NOT merged and simply skipped, because there is no straightforward\n way to resolve the link.\n\n :param other_trajectory: Other trajectory instance to merge into the current one.\n\n :param trial_parameter:\n\n If you have a particular parameter that specifies only the trial\n number, i.e. an integer parameter running form 0 to T1 and\n 0 to T2, the parameter is modified such that after merging it will\n cover the range 0 to T1+T2+1. T1 is the number of individual trials in the current\n trajectory and T2 number of trials in the other trajectory.\n\n :param remove_duplicates:\n\n Whether you want to remove duplicate parameter points.\n Requires N1 * N2 (quadratic complexity in single runs).\n A ValueError is raised if no runs would be merged.\n\n :param ignore_data:\n\n List of full names of data that should be ignored and not merged.\n\n :param backup:\n\n If ``True``, backs up both trajectories into files chosen automatically\n by the storage services. If you want to customize your backup use\n the `f_backup` function instead.\n\n :param move_data:\n\n Tells the storage service to move data from one trajectory to the other\n instead of copying it.\n\n If you use the HDF5 storage service and both trajectories are\n stored in the same file, merging is performed fast directly within\n the file. You can choose if you want to copy nodes ('move_nodes=False`)\n from the other trajectory to the current one, or if you want to move them.\n Accordingly, the stored data is no longer accessible in the other trajectory.\n\n :param delete_other_trajectory:\n\n If you want to delete the other trajectory after merging.\n\n :param keep_info:\n\n If `True`, information about the merge is added to the trajectory `config` tree under\n `config.merge`.\n\n :param merge_config:\n\n Whether or not to merge all config parameters under `.config.git`,\n `.config.environment`, and `.config.merge` of the other trajectory\n into the current one.\n\n :param keep_other_trajectory_info:\n\n Whether to keep information like length, name, etc. of the other trajectory\n in case you want to keep all the information. Setting of `keep_other_trajectory_info`\n is irrelevant in case `keep_info=False`.\n\n :param consecutive_merge:\n Can be set to `True` if you are about to merge several trajectories into the current\n one within a loop to avoid quadratic complexity.\n But remember to store your trajectory manually after\n all merges. Also make sure that all parameters and derived parameters are available\n in your current trajectory and load them before the consecutive merging.\n Also avoid specifying a `trial_parameter` and set `backup=False`\n to avoid quadratic complexity in case of consecutive merges.\n\n :param slow_merge:\n Enforces a slow merging. This means all data is loaded one after the other to\n memory and stored to disk. Otherwise it is tried to directly copy the data\n from one file into another without explicitly loading the data.\n\n If you cannot directly merge trajectories within one HDF5 file, a slow merging process\n is used. Results are loaded, stored, and emptied again one after the other. Might take\n some time!\n\n Annotations of parameters and derived parameters under `.derived_parameters.trajectory`\n are NOT merged. If you wish to extract the annotations of these parameters you have to\n do that manually before merging. Note that annotations of results and derived parameters\n of single runs are copied, so you don't have to worry about these.", "id": "f4238:c0:m54"} {"signature": "@propertydef v_crun_(self):", "body": "return self.v_crun if self.v_crun is not None else self.f_wildcard('', -)", "docstring": "Similar to ``v_crun`` but returns ``'run_ALL'`` if ``v_crun`` is ``None``.", "id": "f4238:c0:m16"} {"signature": "def _remove_exploration(self):", "body": "for param in self._explored_parameters.values():if param._stored:try:self.f_delete_item(param)except Exception:self._logger.exception('''' % param.v_full_name)", "docstring": "Called if trajectory is expanded, deletes all explored parameters from disk", "id": "f4238:c0:m38"} {"signature": "@not_in_rundef f_preset_parameter(self, param_name, *args, **kwargs):", "body": "if not param_name.startswith(''):param_name = '' + param_nameself._preset(param_name, args, kwargs)", "docstring": "Presets parameter value before a parameter is added.\n\n Can be called before parameters are added to the Trajectory in order to change the\n values that are stored into the parameter on creation.\n\n After creation of a parameter, the instance of the parameter is called\n with `param.f_set(*args,**kwargs)` with `*args`, and `**kwargs` provided by the user\n with `f_preset_parameter`.\n\n Before an experiment is carried out it is checked if all parameters that were\n marked were also preset.\n\n :param param_name:\n\n The full name (!) of the parameter that is to be changed after its creation.\n\n :param args:\n\n Arguments that will be used for changing the parameter's data\n\n :param kwargs:\n\n Keyword arguments that will be used for changing the parameter's data\n\n Example:\n\n >>> traj.f_preset_parameter('groupA.param1', data=44)\n >>> traj.f_add_parameter('groupA.param1', data=11)\n >>> traj.parameters.groupA.param1\n 44", "id": "f4238:c0:m30"} {"signature": "def _merge_derived_parameters(self,other_trajectory,used_runs,rename_dict,allowed_translations,ignore_data):", "body": "other_derived_parameters = other_trajectory._derived_parameters.copy()new_first_run_idx = min(used_runs.values())run_name_dummy = other_trajectory.f_wildcard('', -)for param_name in other_derived_parameters:if param_name in ignore_data:continuesplit_name = param_name.split('')if not any(x in run_name_dummy for x in split_name):continueignore_data.add(param_name)param = other_derived_parameters[param_name]new_param_name = self._rename_full_name(param_name, other_trajectory,used_runs=used_runs)if new_param_name in self:my_param = self.f_get(new_param_name, fast_access=False)if (my_param._equal_values(my_param.f_get(), param.f_get()) andnot (my_param.f_has_range() or param.f_has_range())):continuefirst_new_param_name = self._rename_full_name(param_name,other_trajectory,new_run_idx=new_first_run_idx)rename_dict[param_name] = first_new_param_namecomment = param.v_commentparam_type = param.f_get_class_name()param_type = self._create_class(param_type)first_param = self.f_add_leaf(param_type,first_new_param_name,comment=comment)for run_idx in used_runs.values():if run_idx == new_first_run_idx:continuenext_name = self._rename_full_name(param_name, other_trajectory,new_run_idx=run_idx)split_name = next_name.split('')link_name = split_name.pop()location_name = ''.join(split_name)if not self.f_contains(location_name, shortcuts=False):the_group = self.f_add_group(location_name)else:the_group = self.f_get(location_name)the_group.f_add_link(link_name, first_param)for param_name in other_derived_parameters:if param_name in ignore_data:continuesplit_name = param_name.split('')ignore_data.add(param_name)if any(x in other_trajectory._reversed_wildcards and x not in allowed_translationsfor x in split_name):continuenew_name = self._rename_full_name(param_name, other_trajectory,used_runs=used_runs)if self.f_contains(new_name):my_param = self.f_get(new_name, fast_access=False)param = other_derived_parameters[param_name]if (my_param._equal_values(my_param.f_get(), param.f_get()) andnot (my_param.f_has_range() or param.f_has_range())):continueelse:self._logger.error('''' % new_name)rename_dict[param_name] = new_name", "docstring": "Merges derived parameters that have the `run_ALL` in a name.\n\n Creates a new parameter with the name of the first new run and links to this\n parameter to avoid copying in all other runs.", "id": "f4238:c0:m57"} {"signature": "@v_crun.setter@not_in_rundef v_crun(self, run_name):", "body": "self.f_set_crun(run_name)", "docstring": "Changes the run name to make the trajectory behave as during a single run", "id": "f4238:c0:m18"} {"signature": "@propertydef v_python(self):", "body": "return self._python", "docstring": "The version of python as a string that was used to create the trajectory", "id": "f4238:c0:m8"} {"signature": "@not_in_rundef f_iter_runs(self, start=, stop=None, step=, yields=''):", "body": "if stop is None:stop =len(self)elif stop > len(self):raise ValueError('')yields = yields.lower()if yields == '':yield_func = lambda x: self.f_idx_to_run(x)elif yields == '':yield_func = lambda x: xelif yields == '':yield_func = lambda x: selfelif yields == '':yield_func = lambda x: self.__copy__()else:raise ValueError('')for idx in range(start, stop, step):self.f_set_crun(idx)yield yield_func(idx)self.f_set_crun(None)", "docstring": "Makes the trajectory iterate over all runs.\n\n :param start: Start index of run\n\n :param stop: Stop index, leave ``None`` for length of trajectory\n\n :param step: Stepsize\n\n :param yields:\n\n What should be yielded: ``'name'`` of run, ``idx`` of run\n or ``'self'`` to simply return the trajectory container.\n\n You can also pick ``'copy'`` to get **shallow** copies (ie the tree is copied but\n no leave nodes except explored ones.) of your trajectory,\n might lead to some of overhead.\n\n Note that after a full iteration, the trajectory is set back to normal.\n\n Thus, the following code snippet\n\n ::\n\n for run_name in traj.f_iter_runs():\n\n # Do some stuff here...\n\n\n is equivalent to\n\n ::\n\n for run_name in traj.f_get_run_names(sort=True):\n traj.f_set_crun(run_name)\n\n # Do some stuff here...\n\n traj.f_set_crun(None)\n\n\n :return:\n\n Iterator over runs. The iterator itself will return the run names but modify\n the trajectory in each iteration and set it back do normal in the end.", "id": "f4238:c0:m26"} {"signature": "def f_start_run(self, run_name_or_idx=None, turn_into_run=True):", "body": "if self._run_started:return selfif run_name_or_idx is None:if self.v_idx == -:raise ValueError('')else:self.f_set_crun(run_name_or_idx)self._run_started = Trueif turn_into_run:self._make_single_run()self._set_start()return self", "docstring": "Can be used to manually allow running of an experiment without using an environment.\n\n :param run_name_or_idx:\n\n Can manually set a trajectory to a particular run. If `None` the current run\n the trajectory is set to is used.\n\n :param turn_into_run:\n\n Turns the trajectory into a run, i.e. reduces functionality but makes storing\n more efficient.", "id": "f4238:c0:m73"} {"signature": "def _update_run_information(self, run_information_dict):", "body": "idx = run_information_dict['']name = run_information_dict['']self._run_information[name] = run_information_dictself._updated_run_information.add(idx)", "docstring": "Overwrites the run information of a particular run", "id": "f4238:c0:m43"} {"signature": "def _merge_slowly(self, other_trajectory, rename_dict):", "body": "for other_key in rename_dict:new_key = rename_dict[other_key]other_instance = other_trajectory.f_get(other_key)if other_instance.f_is_empty():with self._nn_interface._disable_logging:other_trajectory.f_load_item(other_instance)if not self.f_contains(new_key):class_name = other_instance.f_get_class_name()class_ = self._create_class(class_name)my_instance = self.f_add_leaf(class_, new_key)else:my_instance = self.f_get(new_key, shortcuts=False)if not my_instance.f_is_empty():raise RuntimeError('' % new_key)load_dict = other_instance._store()my_instance._load(load_dict)my_instance.f_set_annotations(**other_instance.v_annotations.f_to_dict(copy=False))my_instance.v_comment = other_instance.v_commentself.f_store_item(my_instance)if other_instance.v_is_parameter:other_instance.f_unlock()my_instance.f_unlock()other_instance.f_empty()my_instance.f_empty()", "docstring": "Merges trajectories by loading iteratively items of the other trajectory and\n store it into the current trajectory.\n\n :param rename_dict:\n\n Dictionary containing mappings from the old result names in the `other_trajectory`\n to the new names in the current trajectory.", "id": "f4238:c0:m60"} {"signature": "def __dir__(self):", "body": "result = super(Trajectory, self).__dir__()if self._is_run:result = [x for x in result if not x.startswith('') ornot getattr(getattr(self, x), '', False)]if not is_debug():result.extend(self._children.keys())return result", "docstring": "Adds all children to auto-completion\n\n In case of a single run it spares all non-available functions", "id": "f4238:c0:m127"} {"signature": "@v_standard_leaf.setterdef v_standard_leaf(self, leaf):", "body": "self._standard_leaf = leaf", "docstring": "Sets standard result", "id": "f4238:c0:m95"} {"signature": "def f_idx_to_run(self, name_or_idx):", "body": "return self._single_run_ids[name_or_idx]", "docstring": "Converts an integer idx to the corresponding single run name and vice versa.\n\n Note during a single run ONLY useful if ``v_full_copy`` was set to True.\n\n :param name_or_idx: Name of a single run or an integer index\n\n :return: The corresponding idx or name of the single run\n\n Example usage:\n\n >>> traj.f_idx_to_run(4)\n 'run_00000004'\n >>> traj.f_idx_to_run('run_00000000')\n 0", "id": "f4238:c0:m72"} {"signature": "@propertydef v_standard_leaf(self):", "body": "return self._standard_leaf", "docstring": "The standard constructor used if you add a generic leaf.\n\n The constructor is only used if you do not add items under the usual four subtrees\n (`parameters`, `derived_parameters`, `config`, `results`).", "id": "f4238:c0:m94"} {"signature": "def _store(self):", "body": "if self._data is not None:store_dict = {'': ObjectTable(data={'': [self._data]})}if self.f_has_range():store_dict[''] = ObjectTable(data={'': self._explored_range})self._locked = Truereturn store_dict", "docstring": "Returns a dictionary of formatted data understood by the storage service.\n\n The data is put into an :class:`~pypet.parameter.ObjectTable` named 'data'.\n If the parameter is explored, the exploration range is also put into another table\n named 'explored_data'.\n\n :return: Dictionary containing the data and optionally the exploration range.", "id": "f4239:c2:m17"} {"signature": "def _build_names_old(self, name_idx, is_dia):", "body": "name_list = self._get_name_list(is_dia)return tuple(['' % (SparseParameter.IDENTIFIER, name,SparseParameter.IDENTIFIER, name_idx)for name in name_list])", "docstring": "ONLY for backwards compatibility", "id": "f4239:c4:m8"} {"signature": "def f_supports(self, data):", "body": "dtype = type(data)if dtype is tuple or dtype is list and len(data) == :return True elif dtype is np.ndarray and data.size == and data.ndim == :return True else:return super(ArrayParameter, self).f_supports(data)", "docstring": "Checks if input data is supported by the parameter.", "id": "f4239:c3:m4"} {"signature": "def f_supports(self, data):", "body": "dtype = type(data)if dtype is tuple or dtype is list:if len(data) == :return Falseold_type = Nonefor item in data:if not type(item) in pypetconstants.PARAMETER_SUPPORTED_DATA:return Falseif not old_type is None and old_type != type(item):return Falseold_type = type(item)return Trueelif dtype is np.ndarray or dtype is np.matrix:if data.size == :return False dtype = data.dtypeif np.issubdtype(dtype, np.str):dtype = np.strreturn dtype in pypetconstants.PARAMETER_SUPPORTED_DATA", "docstring": "Checks if input data is supported by the parameter.", "id": "f4239:c2:m9"} {"signature": "def f_supports(self, data):", "body": "return type(data) in pypetconstants.PARAMETER_SUPPORTED_DATA", "docstring": "Checks whether the data is supported by the parameter.", "id": "f4239:c1:m1"} {"signature": "def f_set(self, *args, **kwargs):", "body": "if args and self.v_name is None:raise AttributeError('')for idx, arg in enumerate(args):valstr = self.f_translate_key(idx)self.f_set_single(valstr, arg)for key, arg in kwargs.items():self.f_set_single(key, arg)", "docstring": "Method to put data into the result.\n\n :param args:\n\n The first positional argument is stored with the name of the result.\n Following arguments are stored with `name_X` where `X` is the position\n of the argument.\n\n :param kwargs: Arguments are stored with the key as name.\n\n :raises: TypeError if outer data structure is not understood.\n\n Example usage:\n\n >>> res = Result('supergroup.subgroup.myresult', comment='I am a neat example!')\n >>> res.f_set(333,42.0, mystring='String!')\n >>> res.f_get('myresult')\n 333\n >>> res.f_get('myresult_1')\n 42.0\n >>> res.f_get(1)\n 42.0\n >>> res.f_get('mystring')\n 'String!'", "id": "f4239:c7:m11"} {"signature": "def _load(self, load_dict):", "body": "for key in list(load_dict.keys()):if key in load_dict:if SparseResult.IDENTIFIER in key:new_key = key.split(SparseResult.IDENTIFIER)[]is_dia = load_dict.pop(new_key + SparseResult.IDENTIFIER + '')name_list = SparseParameter._get_name_list(is_dia)rename_list = ['' % (new_key, SparseResult.IDENTIFIER, name)for name in name_list]data_list = [load_dict.pop(name) for name in rename_list]matrix = SparseParameter._reconstruct_matrix(data_list)self._data[new_key] = matrixelse:self._data[key] = load_dict[key]", "docstring": "Loads data from `load_dict`\n\n Reconstruction of sparse matrices similar to the :class:`~pypet.parameter.SparseParameter`.", "id": "f4239:c8:m3"} {"signature": "def _load(self, load_dict):", "body": "if self.v_locked:raise pex.ParameterLockedException('' % self.v_full_name)if '' in load_dict:dump = load_dict['']self._data = pickle.loads(dump)else:self._logger.warning('''' % self.v_full_name)try:self.v_protocol = load_dict[PickleParameter.PROTOCOL]except KeyError:self.v_protocol = PickleParameter._get_protocol(dump)if '' in load_dict:explore_table = load_dict['']name_col = explore_table['']explore_list = []for name_id in name_col:arrayname = self._build_name(name_id)loaded = pickle.loads(load_dict[arrayname])explore_list.append(loaded)self._explored_range = explore_listself._explored = Trueself._default = self._dataself._locked = True", "docstring": "Reconstructs objects from the pickle dumps in `load_dict`.\n\n The 'explored_data' entry in `load_dict` is used to reconstruct\n the exploration range in the correct order.\n\n Sets the `v_protocol` property to the protocol used to store 'data'.", "id": "f4239:c5:m7"} {"signature": "def _restore_default(self):", "body": "raise NotImplementedError(\"\")", "docstring": "Restores original data if changed due to exploration.\n\n If a Parameter is explored, the actual data is changed over the course of different\n simulations. This method restores the original data assigned before exploration.\n\n ABSTRACT: Needs to be defined in subclass", "id": "f4239:c1:m8"} {"signature": "def f_supports_fast_access(self):", "body": "return not self.f_is_empty()", "docstring": "Checks if parameter supports fast access.\n\n A parameter supports fast access if it is NOT empty!", "id": "f4239:c1:m3"} {"signature": "@v_protocol.setterdef v_protocol(self, value):", "body": "self._protocol = value", "docstring": "Sets the protocol", "id": "f4239:c9:m2"} {"signature": "def f_unlock(self):", "body": "self._locked = False", "docstring": "Unlocks the locked parameter.\n\n Please use it very carefully, or best do not use this function at all.\n There should better be no reason to unlock a locked parameter!\n The only exception I can think of is to unlock a large derived parameter\n after usage to subsequently call :func:`~pypet.parameter.BaseParameter.f_empty`\n to clear memory.", "id": "f4239:c1:m15"} {"signature": "def _expand(self, iterable):", "body": "raise NotImplementedError(\"\")", "docstring": "Similar to :func:`~pypet.parameter.BaseParameter._explore` but appends to\n the exploration range.\n\n :param iterable: An iterable specifying the exploration range.\n\n :raises:\n\n ParameterLockedException: If the parameter is locked\n\n TypeError: If the parameter did not have a range before\n\n Example usage:\n\n >>> param = Parameter('groupA.groupB.myparam', data=3.13, comment='I am a neat example')\n >>> param._explore([3.0,2.0,1.0])\n >>> param._expand([42.0,43.0])\n >>> param.f_get_range()\n (3.0,2.0,1.0,42.0,43.0)\n\n ABSTRACT: Needs to be defined in subclass", "id": "f4239:c1:m23"} {"signature": "def f_has_range(self):", "body": "return len(self._explored_range) > ", "docstring": "If the parameter has a range.\n\n Does not have to be `True` if the parameter is explored.\n The range might be removed during pickling to save memory.\n Accordingly, `v_explored` remains `True` whereas `f_has_range` is `False`.", "id": "f4239:c2:m4"} {"signature": "def __dir__(self):", "body": "result = super(Result, self).__dir__()if self._data_ is not None:result.extend(self._data.keys())return result", "docstring": "Adds all data to auto-completion", "id": "f4239:c7:m2"} {"signature": "@v_protocol.setterdef v_protocol(self, value):", "body": "self._protocol = value", "docstring": "Sets the protocol", "id": "f4239:c5:m2"} {"signature": "def _data_sanity_checks(self, explore_iterable):", "body": "data_list = []for val in explore_iterable:if not self.f_supports(val):raise TypeError('' % (repr(val), str(type(val))))if not self._values_of_same_type(val, self._default):raise TypeError('''' %(self.v_full_name, str(type(val)), str(type(self._default))))data_list.append(val)if len(data_list) == :raise ValueError('')return data_list", "docstring": "Checks if data values are valid.\n\n Checks if the data values are supported by the parameter and if the values are of the same\n type as the default value.", "id": "f4239:c2:m16"} {"signature": "def __iter__(self):", "body": "return self._data.__iter__()", "docstring": "Equivalent to iterating over the keys of the data dictionary.", "id": "f4239:c7:m14"} {"signature": "@propertydef v_locked(self):", "body": "return self._locked", "docstring": "Whether or not the parameter is locked and prevents further modification", "id": "f4239:c1:m2"} {"signature": "def _explore(self, explore_iterable):", "body": "if self.v_locked:raise pex.ParameterLockedException('' % self.v_full_name)if self.f_has_range():raise TypeError('''' % self._name)if self._data is None:raise TypeError('''' % self.v_full_name)data_list = self._data_sanity_checks(explore_iterable)self._explored_range = data_listself._explored = Trueself.f_lock()", "docstring": "Explores the parameter according to the iterable.\n\n Raises ParameterLockedException if the parameter is locked.\n Raises TypeError if the parameter does not support the data,\n the types of the data in the iterable are not the same as the type of the default value,\n or the parameter has already an exploration range.\n\n Note that the parameter will iterate over the whole iterable once and store\n the individual data values into a tuple. Thus, the whole exploration range is\n explicitly stored in memory.\n\n :param explore_iterable: An iterable specifying the exploration range\n\n For example:\n\n >>> param._explore([3.0,2.0,1.0])\n >>> param.f_get_range()\n (3.0, 2.0, 1.0)\n\n :raises TypeError,ParameterLockedException", "id": "f4239:c2:m14"} {"signature": "def _values_of_same_type(self, val1, val2):", "body": "if self.f_supports(val1) != self.f_supports(val2):return Falseif not self.f_supports(val1) and not self.f_supports(val2):raise TypeError('''' %str(type(val1)), str(type(val2)))return type(val1) is type(val2)", "docstring": "Checks if two values agree in type.\n\n For example, two 32 bit integers would be of same type, but not a string and an integer,\n nor a 64 bit and a 32 bit integer.\n\n This is important for exploration. You are only allowed to explore data that\n is of the same type as the default value.\n\n One could always come up with a trivial solution of `type(val1) is type(val2)`.\n But sometimes your parameter does want even more strict equality or\n less type equality.\n\n For example, the :class:`~pypet.parameter.Parameter` has a stricter sense of\n type equality regarding numpy arrays. In order to have two numpy arrays of the same type,\n they must also agree in shape. However, the :class:`~pypet.parameter.ArrayParameter`,\n considers all numpy arrays as of being of same type regardless of their shape.\n\n Moreover, the :class:`~pypet.parameter.SparseParameter` considers all supported\n sparse matrices (csc, csr, bsr, dia) as being of the same type. You can make\n explorations using all these four types at once.\n\n The difference in how strict types are treated arises from the way parameter data\n is stored to disk and how the parameters hand over their data to the storage service\n (see :func:`pypet.parameter.BaseParameter._store`).\n\n The :class:`~pypet.parameter.Parameter` puts all it's data in an\n :class:`~pypet.parameter.ObjectTable` which\n has strict constraints on the column sizes. This means that numpy array columns only\n accept numpy arrays with a particular size. In contrast, the array and sparse\n parameter hand over their data as individual items which yield individual entries\n in the hdf5 node. In order to see what I mean simply run an experiment with all 3\n parameters, explore all of them, and take a look at the resulting hdf5 file!\n\n However, this BaseParameter class implements the straightforward version of\n `type(val1) is type(val2)` to consider data to be of the same type.\n\n :raises: TypeError: if both values are not supported by the parameter.", "id": "f4239:c1:m12"} {"signature": "def f_empty(self):", "body": "raise NotImplementedError(\"\")", "docstring": "Erases all data in the parameter.\n\n Does not erase data from disk. So if the parameter has\n been stored with a service to disk and is emptied,\n it can be restored by loading from disk.\n\n :raises: ParameterLockedException: If the parameter is locked.\n\n ABSTRACT: Needs to be defined in subclass", "id": "f4239:c1:m28"} {"signature": "@staticmethoddef _is_supported_matrix(data):", "body": "return (spsp.isspmatrix_csc(data) orspsp.isspmatrix_csr(data) orspsp.isspmatrix_bsr(data) orspsp.isspmatrix_dia(data))", "docstring": "Checks if a data is csr, csc, bsr, or dia Scipy sparse matrix", "id": "f4239:c4:m2"} {"signature": "def __getattr__(self, item):", "body": "if item == '':return self.f_get()elif item == '':return self.f_get_default()else:raise AttributeError('' % (self.f_get_class_name(),item))", "docstring": "Allows to query for `.data` as an attribute", "id": "f4239:c2:m6"} {"signature": "def f_get_range(self, copy=True):", "body": "raise NotImplementedError(\"\")", "docstring": "Returns an iterable to iterate over the values of the exploration range.\n\n Note that the returned values should be either a copy of the exploration range\n unless explicetly requested otherwise.\n\n :param copy:\n\n If range should be copied to avoid tempering with data.\n\n :return: Iterable\n\n :raises: TypeError if the parameter is not explored\n\n Example usage:\n\n >>> param = Parameter('groupA.groupB.myparam',data=22, comment='I am a neat example')\n >>> param._explore([42,43,43])\n >>> param.f_get_range()\n (42,43,44)\n\n ABSTRACT: Needs to be defined in subclass", "id": "f4239:c1:m21"} {"signature": "@propertydef v_protocol(self):", "body": "return self._protocol", "docstring": "The protocol used to pickle data, default is 0.\n\n See pickle_ documentation for the protocols.\n\n .. _pickle: http://docs.python.org/2/library/pickle.html", "id": "f4239:c5:m1"} {"signature": "def euler_scheme(traj, diff_func):", "body": "steps = traj.stepsinitial_conditions = traj.initial_conditionsdimension = len(initial_conditions)result_array = np.zeros((steps,dimension))func_params_dict = traj.func_params.f_to_dict(short_names=True, fast_access=True)result_array[] = initial_conditionsfor idx in range(,steps):result_array[idx] = diff_func(result_array[idx-], **func_params_dict) * traj.dt +result_array[idx-]traj.f_add_result('', data=result_array, comment='')", "docstring": "Simulation function for Euler integration.\n\n :param traj:\n\n Container for parameters and results\n\n :param diff_func:\n\n The differential equation we want to integrate", "id": "f4243:m0"} {"signature": "def diff_lorenz(value_array, sigma, beta, rho):", "body": "diff_array = np.zeros()diff_array[] = sigma * (value_array[]-value_array[])diff_array[] = value_array[] * (rho - value_array[]) - value_array[]diff_array[] = value_array[] * value_array[] - beta * value_array[]return diff_array", "docstring": "The Lorenz attractor differential equation\n\n :param value_array: 3d array containing the x,y, and z component values.\n :param sigma: Constant attractor parameter\n :param beta: FConstant attractor parameter\n :param rho: Constant attractor parameter\n\n :return: 3d array of the Lorenz system evaluated at `value_array`", "id": "f4243:m2"} {"signature": "def manipulate_multiproc_safe(traj):", "body": "traj.last_process_name = mp.current_process().nametraj.results.f_store(store_data=)", "docstring": "Target function that manipulates the trajectory.\n\n Stores the current name of the process into the trajectory and\n **overwrites** previous settings.\n\n :param traj:\n\n Trajectory container with multiprocessing safe storage service", "id": "f4245:m0"} {"signature": "def _make_folder(self, traj):", "body": "print_folder = os.path.join(traj.analysis.plot_folder,traj.v_name, traj.v_crun)print_folder = os.path.abspath(print_folder)if not os.path.isdir(print_folder):os.makedirs(print_folder)return print_folder", "docstring": "Makes a subfolder for plots.\n\n :return: Path name to print folder", "id": "f4249:c4:m3"} {"signature": "def _add_monitors(self, traj, network, network_dict):", "body": "neurons_e = network_dict['']monitor_list = []self.spike_monitor = SpikeMonitor(neurons_e)monitor_list.append(self.spike_monitor)self.V_monitor = StateMonitor(neurons_e,'',record=list(traj.neuron_records))monitor_list.append(self.V_monitor)self.I_syn_e_monitor = StateMonitor(neurons_e, '',record=list(traj.neuron_records))monitor_list.append(self.I_syn_e_monitor)self.I_syn_i_monitor = StateMonitor(neurons_e, '',record=list(traj.neuron_records))monitor_list.append(self.I_syn_i_monitor)network.add(*monitor_list)network_dict[''] = monitor_list", "docstring": "Adds monitors to the network", "id": "f4249:c4:m2"} {"signature": "def analyse(self, traj, network, current_subrun, subrun_list, network_dict):", "body": "if len(subrun_list)==:spikes_e = traj.results.monitors.spikes_etime_window = traj.parameters.analysis.statistics.time_windowstart_time = traj.parameters.simulation.durations.initial_runend_time = start_time+traj.parameters.simulation.durations.measurement_runneuron_ids = traj.parameters.analysis.statistics.neuron_idsmean_ff = self._compute_mean_fano_factor(neuron_ids, spikes_e, time_window, start_time, end_time)traj.f_add_result('', mean_ff, comment='''''')print('' % (traj.R_ee, mean_ff))", "docstring": "Calculates average Fano Factor of a network.\n\n :param traj:\n\n Trajectory container\n\n Expects:\n\n `results.monitors.spikes_e`: Data from SpikeMonitor for excitatory neurons\n\n Adds:\n\n `results.statistics.mean_fano_factor`: Average Fano Factor\n\n :param network:\n\n The BRIAN network\n\n :param current_subrun:\n\n BrianParameter\n\n :param subrun_list:\n\n Upcoming subruns, analysis is only performed if subruns is empty,\n aka the final subrun has finished.\n\n :param network_dict:\n\n Dictionary of items shared among componetns", "id": "f4249:c3:m3"} {"signature": "@staticmethoddef _build_model_eqs(traj):", "body": "model_eqs = traj.model.eqspost_eqs={}for name_post in ['','']:variables_dict ={}new_model_eqs=model_eqs.replace('', name_post)for name_pre in ['', '']:conn_eqs = traj.model.synaptic.eqsnew_conn_eqs = conn_eqs.replace('', name_pre)new_model_eqs += new_conn_eqstau1 = traj.model.synaptic['']tau2 = traj.model.synaptic[''+name_pre]normalization = (tau1-tau2) / tau2invtau1=/tau1invtau2 = /tau2variables_dict[''+name_pre] = invtau1variables_dict[''+name_pre] = invtau2variables_dict[''+name_pre] = normalizationvariables_dict[''+name_pre] = tau1variables_dict[''+name_pre] = tau2variables_dict[''+name_post] = traj.model[''+name_post]post_eqs[name_post] = Equations(new_model_eqs, **variables_dict)return post_eqs", "docstring": "Computes model equations for the excitatory and inhibitory population.\n\n Equation objects are created by fusing `model.eqs` and `model.synaptic.eqs`\n and replacing `PRE` by `i` (for inhibitory) or `e` (for excitatory) depending\n on the type of population.\n\n :return: Dictionary with 'i' equation object for inhibitory neurons and 'e' for excitatory", "id": "f4249:c0:m1"} {"signature": "@staticmethoddef _compute_fano_factor(spike_res, neuron_id, time_window, start_time, end_time):", "body": "assert(end_time >= start_time+time_window)bins = (end_time-start_time)/time_windowbins = int(np.floor(bins))binned_spikes = np.zeros(bins)spike_array_neuron = spike_res.t[spike_res.i==neuron_id]for bin in range(bins):lower_time = start_time+time_window*binupper_time = start_time+time_window*(bin+)spike_array_interval = spike_array_neuron[spike_array_neuron >= lower_time]spike_array_interval = spike_array_interval[spike_array_interval < upper_time]spikes = len(spike_array_interval)binned_spikes[bin]=spikesvar = np.var(binned_spikes)avg = np.mean(binned_spikes)if avg > :return var/float(avg)else:return ", "docstring": "Computes Fano Factor for one neuron.\n\n :param spike_res:\n\n Result containing the spiketimes of all neurons\n\n :param neuron_id:\n\n Index of neuron for which FF is computed\n\n :param time_window:\n\n Length of the consecutive time windows to compute the FF\n\n :param start_time:\n\n Start time of measurement to consider\n\n :param end_time:\n\n End time of measurement to consider\n\n :return:\n\n Fano Factor (float) or\n returns 0 if mean firing activity is 0.", "id": "f4249:c3:m1"} {"signature": "def _build_connections(self, traj, brian_list, network_dict):", "body": "connections = traj.connectionsneurons_i = network_dict['']neurons_e = network_dict['']print('')self.conn_ii = Synapses(neurons_i,neurons_i, on_pre='' % connections.J_ii)self.conn_ii.connect('', p=connections.p_ii)print('')self.conn_ei = Synapses(neurons_i,neurons_e, on_pre='' % connections.J_ei)self.conn_ei.connect('', p=connections.p_ei)print('')self.conn_ie = Synapses(neurons_e,neurons_i, on_pre='' % connections.J_ie)self.conn_ie.connect('', p=connections.p_ie)conns_list = [self.conn_ii, self.conn_ei, self.conn_ie]if connections.R_ee > :cluster_list=[]cluster_conns_list=[]model=traj.modelclusters = int(model.N_e/connections.clustersize_e)traj.f_add_derived_parameter('', clusters, comment='')p_out = (connections.p_ee*model.N_e) /(connections.R_ee*connections.clustersize_e+model.N_e- connections.clustersize_e)p_in = p_out * connections.R_eetraj.f_add_derived_parameter('', p_in ,comment='')traj.f_add_derived_parameter('', p_out ,comment='')low_index = high_index = connections.clustersize_efor irun in range(clusters):cluster = neurons_e[low_index:high_index]print('' % (irun, clusters))conn = Synapses(cluster,cluster,on_pre='' % (connections.J_ee*connections.strength_factor))conn.connect('', p=p_in)cluster_conns_list.append(conn)if low_index > :rest_low = neurons_e[:low_index]print('')low_conn = Synapses(cluster,rest_low,on_pre='' % connections.J_ee)low_conn.connect('', p=p_out)cluster_conns_list.append(low_conn)if high_index < model.N_e:rest_high = neurons_e[high_index:model.N_e]print('')high_conn = Synapses(cluster,rest_high,on_pre='' % connections.J_ee)high_conn.connect('', p=p_out)cluster_conns_list.append(high_conn)low_index=high_indexhigh_index+=connections.clustersize_eself.cluster_conns=cluster_conns_listconns_list+=cluster_conns_listelse:print('')self.conn_ee = Synapses(neurons_e,neurons_e,on_pre='' % connections.J_ee)self.conn_ee.connect('', p=connections.p_ee)conns_list.append(self.conn_ee)brian_list.extend(conns_list)network_dict[''] = conns_list", "docstring": "Connects neuron groups `neurons_i` and `neurons_e`.\n\n Adds all connections to `brian_list` and adds a list of connections\n with the key 'connections' to the `network_dict`.", "id": "f4249:c1:m3"} {"signature": "def multiply(traj):", "body": "z=traj.x*traj.ytraj.f_add_result('',z=z, comment='')", "docstring": "Sophisticated simulation of multiplication", "id": "f4250:m0"} {"signature": "def multiply(traj):", "body": "z=traj.x*traj.ytraj.f_add_result('',z, comment='')", "docstring": "Sophisticated simulation of multiplication", "id": "f4251:m0"} {"signature": "def make_filename(traj):", "body": "explored_parameters = traj.f_get_explored_parameters()filename = ''for param in explored_parameters.values():short_name = param.v_nameval = param.f_get()filename += '' % (short_name, str(val))return filename[:-] + ''", "docstring": "Function to create generic filenames based on what has been explored", "id": "f4253:m0"} {"signature": "def main():", "body": "logger = logging.getLogger()folder = os.path.join(os.getcwd(), '', '')if not os.path.isdir(folder):os.makedirs(folder)filename = os.path.join(folder, '')env = Environment(trajectory='',multiproc=True,ncores=,wrap_mode='',filename=filename,overwrite_file=True)traj = env.trajtraj.par.ncells = Parameter('', , '')traj.par.steps = Parameter('', , '')traj.par.rule_number = Parameter('', , '')traj.par.initial_name = Parameter('', '', '')traj.par.seed = Parameter('', , '')exp_dict = {'' : [, , , , ],'' : ['', ''],}exp_dict = cartesian_product(exp_dict)traj.f_explore(exp_dict)logger.info('')env.run(wrap_automaton)traj.f_load(load_data=)logger.info('')for idx, run_name in enumerate(traj.f_iter_runs()):filename = os.path.join(folder, make_filename(traj))plot_pattern(traj.crun.pattern, traj.rule_number, filename)progressbar(idx, len(traj), logger=logger)env.disable_logging()", "docstring": "Main *boilerplate* function to start simulation", "id": "f4253:m2"} {"signature": "def eval_wrapper(the_tuple):", "body": "return eval_one_max(*the_tuple)", "docstring": "Wrapper function that unpacks a single tuple as arguments to the fitness function.\n\n The pool's map function only allows a single iterable so we need to zip it first\n and then unpack it here.", "id": "f4256:m1"} {"signature": "@manual_run(store_meta_data=True) def eval_one_max(traj, individual):", "body": "traj.f_add_result('', list(individual))fitness = sum(individual)traj.f_add_result('', fitness)traj.f_store()return (fitness,)", "docstring": "The fitness function", "id": "f4256:m0"} {"signature": "def add_parameters(traj):", "body": "print('')traj.f_add_parameter('', ,comment='''')traj.f_add_parameter('', ,comment='')traj.f_add_parameter('', ,comment='')traj.f_add_parameter('', ,comment='''''')traj.f_add_parameter('', ,comment='''')traj.f_add_parameter('', ,comment='')", "docstring": "Adds all parameters to `traj`", "id": "f4259:m2"} {"signature": "def mypipeline(traj):", "body": "add_parameters(traj)add_exploration(traj)return (run_neuron,(),{}), (neuron_postproc,(),{})", "docstring": "A pipeline function that defines the entire experiment\n\n :param traj:\n\n Container for results and parameters\n\n :return:\n\n Two tuples. First tuple contains the actual run function plus additional\n arguments (yet we have none). Second tuple contains the\n postprocessing function including additional arguments.", "id": "f4260:m0"} {"signature": "def multiply(traj):", "body": "z=traj.x * traj.ytraj.f_add_result('', z, comment='')", "docstring": "Sophisticated simulation of multiplication", "id": "f4264:m0"} {"signature": "def multiply(traj):", "body": "z = traj.x * traj.ytraj.f_add_result('', z, comment='')", "docstring": "Example of a sophisticated simulation that involves multiplying two values.\n\n :param traj:\n\n Trajectory containing\n the parameters in a particular combination,\n it also serves as a container for results.", "id": "f4265:m0"} {"signature": "def multiply(traj):", "body": "z=traj.x*traj.ytraj.f_add_result('',z=z, comment='',)", "docstring": "Sophisticated simulation of multiplication", "id": "f4268:m0"} {"signature": "def eval_one_max(traj):", "body": "fitness = sum(traj.individual)return (fitness,)", "docstring": "The fitness function", "id": "f4270:m0"} {"signature": "def video(request, video_id):", "body": "api = Api()api.authenticate()availability = api.check_upload_status(video_id)if availability is not True:video = Video.objects.filter(video_id=video_id).get()state = availability[\"\"]if state == \"\" or state == \"\":return render_to_response(\"\",{\"\": video, \"\": video_id, \"\":_(\"\"), \"\": availability},context_instance=RequestContext(request))else:return render_to_response(\"\",{\"\": video, \"\": video_id,\"\": _(\"\"), \"\": availability},context_instance=RequestContext(request))video_params = _video_params(request, video_id)return render_to_response(\"\",video_params,context_instance=RequestContext(request))", "docstring": "Displays a video in an embed player", "id": "f4273:m2"} {"signature": "def get_absolute_url(self):", "body": "return self.swf_url", "docstring": "Returns the swf url", "id": "f4276:c0:m1"} {"signature": "def _access_control(self, access_control, my_media_group=None):", "body": "extension = Noneif access_control is AccessControl.Private:if my_media_group:my_media_group.private = gdata.media.Private()elif access_control is AccessControl.Unlisted:from gdata.media import YOUTUBE_NAMESPACEfrom atom import ExtensionElementkwargs = {\"\": YOUTUBE_NAMESPACE,\"\": {'': '', '': ''},}extension = ([ExtensionElement('', **kwargs)])return extension", "docstring": "Prepares the extension element for access control\nExtension element is the optional parameter for the YouTubeVideoEntry\nWe use extension element to modify access control settings\n\nReturns:\n tuple of extension elements", "id": "f4278:c3:m1"} {"signature": "def fetch_video(self, video_id):", "body": "return Api.yt_service.GetYouTubeVideoEntry('' % video_id)", "docstring": "Retrieve a specific video entry and return it\n@see http://gdata-python-client.googlecode.com/hg/pydocs/gdata.youtube.html#YouTubeVideoEntry", "id": "f4278:c3:m2"} {"signature": "def check_upload_status(self, video_id):", "body": "if not self.authenticated:raise ApiError(_(\"\"))entry = self.fetch_video(video_id)upload_status = Api.yt_service.CheckUploadStatus(entry)if upload_status is not None:video_upload_state = upload_status[]detailed_message = upload_status[]return {\"\": video_upload_state, \"\": detailed_message}else:return True", "docstring": "Checks the video upload status\nNewly uploaded videos may be in the processing state\n\nAuthentication is required\n\nReturns:\n True if video is available\n otherwise a dict containes upload_state and detailed message\n i.e. {\"upload_state\": \"processing\", \"detailed_message\": \"\"}", "id": "f4278:c3:m7"} {"signature": "def home(request):", "body": "return render_to_response('', {}, context_instance=RequestContext(request))", "docstring": "home page", "id": "f4279:m0"} {"signature": "def _load_config(path: str) -> dict:", "body": "__, ext = os.path.splitext(path)if ext in ['', '']:import ruamel.yamlloader = ruamel.yaml.safe_loadelse:loader = json.loadwith open(path) as f:config = loader(f)return config", "docstring": "Given a file path, parse it based on its extension (YAML or JSON)\nand return the values as a Python dictionary. JSON is the default if an\nextension can't be determined.", "id": "f4295:m0"} {"signature": "def __init__(self,file_env_var: str = None,default_files: List[str] = None,load: bool = False):", "body": "self.file_env_var = file_env_varself.config_file = Noneself.default_files = default_files or []if load:self.load()", "docstring": ":param file_env_var: the name of an environment variable which can be\n used for the name of the configuration file to\n load\n:param default_files: if no file is given, try to load a configuration\n from these files in order\n:param load: load config file on instantiation [default: False].\n\nA docstring defined on the class should be a plain-text description\nused as a header when generating a configuration file.", "id": "f4295:c0:m0"} {"signature": "def wait(self):", "body": "with self.cvar:self.count.value += self.cvar.notify_all()while self.count.value < self.n_procs:self.cvar.wait()", "docstring": "Wait until all processes have reached the barrier.", "id": "f4299:c1:m1"} {"signature": "def reset(self):", "body": "with self.cvar:self.count.value = ", "docstring": "Re-set the barrier so that it can be used again.", "id": "f4299:c1:m2"} {"signature": "def get_queue(self, path, n_procs=, read_ahead=None, cyclic=False, block_size=None, ordered=False):", "body": "example = self.__get_batch(path, block_size)block_size = example.shape[]if read_ahead is None:read_ahead = *n_procs + cbuf = SharedCircBuf(read_ahead, example)stop = multiprocessing.Event()barrier = Barrier(n_procs)sync = GuardSynchronizer() if ordered else Noneprocs = []for i in range(n_procs):process = multiprocessing.Process(target=_Streamer__read_process, args=(self, path, block_size, cbuf, stop, barrier, cyclic,i * block_size, n_procs * block_size, sync))process.daemon = Trueprocess.start()procs.append(process)if not cyclic:def monitor():for p in procs:p.join()cbuf.close()monitor_thread = threading.Thread(target=monitor)monitor_thread.daemon = Truemonitor_thread.start()return Streamer.Queue(cbuf, stop, block_size)", "docstring": "Get a queue that allows direct access to the internal buffer. If the dataset to be read is chunked, the\nblock_size should be a multiple of the chunk size to maximise performance. In this case it is best to leave it\nto the default. When cyclic=False, and block_size does not divide the dataset evenly, the remainder elements\nwill not be returned by the queue. When cyclic=True, the remainder elements will be part of a block that wraps\naround the end and includes element from the beginning of the dataset. By default, blocks are returned in the\norder in which they become available. The ordered option will force blocks to be returned in on-disk order.\n\n:param path: The HDF5 path to the dataset that should be read.\n:param n_procs: The number of background processes used to read the datset in parallel.\n:param read_ahead: The number of blocks to allocate in the internal buffer.\n:param cyclic: True if the queue should wrap at the end of the dataset.\n:param block_size: The size along the outer dimension of the blocks to be read. Defaults to a multiple of\n the chunk size, or to a 128KB sized block if the dataset is not chunked.\n:param ordered: Force the reader return data in on-disk order. May result in performance penalty.\n:return: A queue object that allows access to the internal buffer.", "id": "f4299:c7:m3"} {"signature": "def __get_batch(self, path, length, last=False):", "body": "import tablesh5_file = tables.open_file(self.filename, '')h5_node = h5_file.get_node(path)if len(h5_node) == :raise Exception(\"\")if length is None:chunkshape = h5_node.chunkshapeif chunkshape is None:default_length = ***//h5_node[].nbytes length = min(h5_node.shape[], default_length)else:length = chunkshape[]if last:example = h5_node[length*(len(h5_node)//length):].copy()else:example = h5_node[:length].copy()h5_file.close()return example", "docstring": "Get a block of data from the node at path.\n\n:param path: The path to the node to read from.\n:param length: The length along the outer dimension to read.\n:param last: True if the remainder elements should be read.\n:return: A copy of the requested block of data as a numpy array.", "id": "f4299:c7:m1"} {"signature": "def wait(self):", "body": "self.barrier_A.wait()self.barrier_A, self.barrier_B = self.barrier_B, self.barrier_Aself.barrier_A.reset()", "docstring": "Wait until all processes have reached the barrier.", "id": "f4299:c2:m1"} {"signature": "def error(self, status=None):", "body": "def decorator(callback):self._error_handlers[status] = callbackreturn callbackreturn decorator", "docstring": "Decorator to add a callback that generates error page.\n\n The *status* parameter specifies the HTTP response status code\n for which the decorated callback should be invoked. If the\n *status* argument is not specified, then the decorated callable\n is considered to be a fallback callback.\n\n A fallback callback, when defined, is invoked to generate the\n error page for any HTTP response representing an error when\n there is no error handler defined explicitly for the response\n code of the HTTP response.\n\n Arguments:\n status(int, optional): HTTP response status code.\n\n Returns:\n function: Decorator function to add error handler.", "id": "f4315:c0:m7"} {"signature": "def __init__(self, pattern, callback):", "body": "self._re = []self._wildcards = []for token in WildcardRoute.tokens(pattern):if token and token.startswith('') and token.endswith('>'):w = Wildcard(token)self._wildcards.append(w)self._re.append(w.regex())else:self._re.append(re.escape(token))self._re = re.compile('' + ''.join(self._re) + '')self._callback = callback", "docstring": "Initialize wildcard route.\n\n Arguments:\n pattern (str): Pattern associated with the route.\n callback (callable): Route handler.", "id": "f4315:c2:m0"} {"signature": "def __init__(self, start_response_callable):", "body": "self.start = start_response_callableself.status = self.media_type = ''self.charset = ''self._headers = []self.body = Noneself.state = {}", "docstring": "Initialize the current response object.\n\n Arguments:\n start_response_callable (callable): Callable that starts response.", "id": "f4315:c6:m0"} {"signature": "def get(self, pattern):", "body": "return self.route('', pattern)", "docstring": "Decorator to add route for an HTTP GET request.\n\n Arguments:\n pattern (str): Routing pattern the path must match.\n\n Returns:\n function: Decorator to add route for HTTP GET request.", "id": "f4315:c0:m4"} {"signature": "@staticmethoddef like(pattern):", "body": "return RegexRoute._group_re.search(pattern) is not None", "docstring": "Determine if a pattern looks like a regular expression.\n\n Arguments:\n pattern (str): Any route pattern.\n\n Returns:\n ``True`` if the specified pattern looks like a regex,\n ``False`` otherwise.", "id": "f4315:c4:m2"} {"signature": "def _get_error_page_callback(self):", "body": "if self.response.status in self._error_handlers:return self._error_handlers[self.response.status]elif None in self._error_handlers:return self._error_handlers[None]else:self.response.media_type = ''return lambda: self.response.status_line", "docstring": "Return an error page for the current response status.", "id": "f4315:c0:m11"} {"signature": "def __init__(self, pattern, callback):", "body": "self._re = re.compile(pattern)self._callback = callback", "docstring": "Initialize regular expression route.\n\n Arguments:\n pattern (str): Pattern associated with the route.\n callback (callable): Route handler.", "id": "f4315:c4:m0"} {"signature": "def __init__(self, environ):", "body": "self.environ = environself.method = environ.get('', '')self.path = environ.get('', '')if not self.path:self.path = ''self.query = MultiDict()self.form = MultiDict()self.cookies = MultiDict()if '' in environ:for k, v in urllib.parse.parse_qsl(environ['']):self.query[k] = vif '' in environ:fs = cgi.FieldStorage(fp=environ[''],environ=environ)for k in fs:for v in fs.getlist(k):self.form[k] = vif '' in environ:cookies = http.cookies.SimpleCookie(environ[''])for c in cookies.values():self.cookies[c.key] = c.value", "docstring": "Initialize the current request object.\n\n Arguments:\n environ (dict): Dictionary of environment variables.", "id": "f4315:c5:m0"} {"signature": "def resolve(self, method, path):", "body": "if method in self._literal and path in self._literal[method]:return self._literal[method][path], [], {}else:return self._resolve_non_literal_route(method, path)", "docstring": "Resolve a request to a route handler.\n\n Arguments:\n method (str): HTTP method, e.g. GET, POST, etc. (type: str)\n path (str): Request path\n\n Returns:\n tuple or None: A tuple of three items:\n\n 1. Route handler (callable)\n 2. Positional arguments (list)\n 3. Keyword arguments (dict)\n\n ``None`` if no route matches the request.", "id": "f4315:c1:m3"} {"signature": "def parse(text, encoding='', handler=None, **defaults):", "body": "text = u(text, encoding).strip()metadata = defaults.copy()handler = handler or detect_format(text, handlers)if handler is None:return metadata, texttry:fm, content = handler.split(text)except ValueError:return metadata, textfm = handler.load(fm)if isinstance(fm, dict):metadata.update(fm)return metadata, content.strip()", "docstring": "Parse text with frontmatter, return metadata and content.\nPass in optional metadata defaults as keyword args.\n\nIf frontmatter is not found, returns an empty metadata dictionary\n(or defaults) and original text content.\n\n::\n\n >>> with open('tests/hello-world.markdown') as f:\n ... metadata, content = frontmatter.parse(f.read())\n >>> print(metadata['title'])\n Hello, world!", "id": "f4320:m1"} {"signature": "def load(fd, encoding='', handler=None, **defaults):", "body": "if hasattr(fd, ''):text = fd.read()else:with codecs.open(fd, '', encoding) as f:text = f.read()handler = handler or detect_format(text, handlers)return loads(text, encoding, handler, **defaults)", "docstring": "Load and parse a file-like object or filename, \nreturn a :py:class:`post `.\n\n::\n\n >>> post = frontmatter.load('tests/hello-world.markdown')\n >>> with open('tests/hello-world.markdown') as f:\n ... post = frontmatter.load(f)", "id": "f4320:m2"} {"signature": "def __setitem__(self, name, value):", "body": "self.metadata[name] = value", "docstring": "Set a metadata key", "id": "f4320:c0:m3"} {"signature": "def load(self, fm):", "body": "raise NotImplementedError", "docstring": "Parse frontmatter and return a dict", "id": "f4321:c0:m3"} {"signature": "def detect(self, text):", "body": "if self.FM_BOUNDARY.match(text):return Truereturn False", "docstring": "Decide whether this handler can parse the given ``text``,\nand return True or False.\n\nNote that this is *not* called when passing a handler instance to \n:py:func:`frontmatter.load ` or :py:func:`loads `.", "id": "f4321:c0:m1"} {"signature": "def load(self, fm, **kwargs):", "body": "kwargs.setdefault('', SafeLoader)return yaml.load(fm, **kwargs)", "docstring": "Parse YAML front matter. This uses yaml.SafeLoader by default.", "id": "f4321:c1:m0"} {"signature": "def export(self, metadata, **kwargs):", "body": "raise NotImplementedError", "docstring": "Turn metadata back into text", "id": "f4321:c0:m4"} {"signature": "def setup_mock_redmine_server(max_failures=):", "body": "http_requests = []failures = max_failuresissues_body = read_file('', '')issues_next_body = read_file('', '')issues_empty_body = read_file('', '')issue_2_body = read_file('', '')issue_5_body = read_file('', '')issue_9_body = read_file('', '')issue_7311_body = read_file('', '')user_3_body = read_file('', '')user_4_body = read_file('', '')user_24_body = read_file('', '')user_25_body = read_file('', '')def request_callback(method, uri, headers):nonlocal failuresstatus = last_request = httpretty.last_request()params = last_request.querystringif uri.startswith(REDMINE_ISSUES_URL):updated_on = params[''][]offset = params[''][]if (updated_on == '' and offset == ''):body = issues_bodyelif (updated_on == '' and offset == ''):body = issues_next_bodyelif (updated_on == '' and offset == ''):body = issues_next_bodyelif (updated_on == '' and offset == ''):body = issues_next_bodyelse:body = issues_empty_bodyelif uri.startswith(REDMINE_ISSUE_2_URL):body = issue_2_bodyelif uri.startswith(REDMINE_ISSUE_5_URL):body = issue_5_bodyelif uri.startswith(REDMINE_ISSUE_9_URL):body = issue_9_bodyelif uri.startswith(REDMINE_ISSUE_7311_URL):if failures > :status = body = \"\"failures -= else:body = issue_7311_bodyelif uri.startswith(REDMINE_USER_3_URL):body = user_3_bodyelif uri.startswith(REDMINE_USER_4_URL):body = user_4_bodyelif uri.startswith(REDMINE_USER_24_URL):body = user_24_bodyelif uri.startswith(REDMINE_USER_25_URL):body = user_25_bodyelse:raisehttp_requests.append(last_request)return (status, headers, body)for url in REDMINE_URL_LIST:httpretty.register_uri(httpretty.GET,url,responses=[httpretty.Response(body=request_callback)])return http_requests", "docstring": "Setup a mock Redmine HTTP server", "id": "f4328:m2"} {"signature": "@staticmethoddef _build_job_arguments(task):", "body": "job_args = {}job_args[''] = Q_STORAGE_ITEMSjob_args[''] = task.task_idjob_args[''] = task.backendbackend_args = copy.deepcopy(task.backend_args)if '' in backend_args:backend_args[''] = backend_args.pop('')if '' in backend_args:backend_args[''] = backend_args.pop('')job_args[''] = backend_argsjob_args[''] = task.categoryarchiving_cfg = task.archiving_cfgjob_args[''] = archiving_cfg.to_dict() if archiving_cfg else Nonesched_cfg = task.scheduling_cfgjob_args[''] = sched_cfg.max_retries if sched_cfg else MAX_JOB_RETRIESreturn job_args", "docstring": "Build the set of arguments required for running a job", "id": "f4335:c2:m6"} {"signature": "def run(self):", "body": "try:self.listen()except Exception as e:logger.critical(\"\", str(e))logger.critical(traceback.format_exc())", "docstring": "Run thread to listen for jobs and reschedule successful ones.", "id": "f4335:c1:m1"} {"signature": "def cancel_task(self, task_id):", "body": "self.registry.remove(task_id)self._scheduler.cancel_job_task(task_id)logger.info(\"\", task_id)", "docstring": "Cancel or 'un-schedule' a task.\n\n :param task_id: identifier of the task to cancel\n\n :raises NotFoundError: raised when the requested task is not\n found in the registry", "id": "f4335:c2:m3"} {"signature": "def listen(self):", "body": "pubsub = self.conn.pubsub()pubsub.subscribe(self.pubsub_channel)logger.debug(\"\", self.pubsub_channel)for msg in pubsub.listen():logger.debug(\"\", str(msg['']))if msg[''] != '':logger.debug(\"\")continuedata = pickle.loads(msg[''])job_id = data['']job = rq.job.Job.fetch(job_id, connection=self.conn)if data[''] == '':logging.debug(\"\", job_id)handler = self.result_handlerelif data[''] == '':logging.debug(\"\", job_id)handler = self.result_handler_errelse:continueif handler:logging.debug(\"\", job_id)handler(job)", "docstring": "Listen for completed jobs and reschedule successful ones.", "id": "f4335:c1:m2"} {"signature": "def initialize_archive_manager(self, archive_path):", "body": "if archive_path == \"\":raise ValueError(\"\")if archive_path:self.archive_manager = perceval.archive.ArchiveManager(archive_path)", "docstring": "Initialize the archive manager.\n\n :param archive_path: path where the archive manager is located", "id": "f4337:c1:m2"} {"signature": "@cherrypy.expose@cherrypy.tools.json_in()@cherrypy.tools.json_out(handler=json_encoder)def remove(self):", "body": "payload = cherrypy.request.jsonlogger.debug(\"\")task_ids = {}for task_data in payload['']:task_id = task_data['']removed = super().remove_task(task_id)task_ids[task_id] = removedresult = {'': task_ids}return result", "docstring": "Remove tasks", "id": "f4338:c0:m4"} {"signature": "@cherrypy.expose@cherrypy.tools.json_in()def add(self):", "body": "payload = cherrypy.request.jsonlogger.debug(\"\")for task_data in payload['']:try:category = task_data['']backend_args = task_data['']archive_args = task_data.get('', None)sched_args = task_data.get('', None)except KeyError as ex:logger.error(\"\")raise exfrom_date = backend_args.get('', None)if from_date:backend_args[''] = str_to_datetime(from_date)super().add_task(task_data[''],task_data[''],category,backend_args,archive_args=archive_args,sched_args=sched_args)logger.debug(\"\")return \"\"", "docstring": "Add tasks", "id": "f4338:c0:m3"} {"signature": "def remove(self, task_id):", "body": "try:self._rwlock.writer_acquire()del self._tasks[task_id]except KeyError:raise NotFoundError(element=str(task_id))finally:self._rwlock.writer_release()logger.debug(\"\", str(task_id))", "docstring": "Remove a task from the registry.\n\n To remove it, pass its identifier with `taks_id` parameter.\n When the identifier is not found, a `NotFoundError` exception\n is raised.\n\n :param task_id: identifier of the task to remove\n\n :raises NotFoundError: raised when the given task identifier\n is not found on the registry", "id": "f4341:c1:m2"} {"signature": "def __parse_schedule_args(self, sched_args):", "body": "if not sched_args:return Nonereturn SchedulingTaskConfig.from_dict(sched_args)", "docstring": "Parse the schedule arguments of a task", "id": "f4342:c0:m7"} {"signature": "def writer_acquire(self):", "body": "self._order_mutex.acquire()self._access_mutex.acquire()self._order_mutex.release()", "docstring": "Acquire the lock to write", "id": "f4343:c0:m3"} {"signature": "def __init__(self, api_key, endpoint, domain, start_month, end_month,time_granularity=\"\", main_domain_only=False):", "body": "if endpoint not in [\"\", \"\", \"\"]:raise InvalidEndpointException(\"\"\"\")self.endpoint = endpointself.domain = utils.domain_from_url(domain)self.start_month = start_monthself.end_month = end_monthself.time_granularity = time_granularityself.main_domain_only = main_domain_onlysuper(EngagementAPI, self).__init__(api_key)", "docstring": "Parameters\n----------\napi_key: string\n SimilarWeb API key\n\nendpoint: string\n Endpoint to use. Can be: pageviews, visitduration, bouncerate\n\ndomain: string\n Domain to query.\n\nstart_month: string\n Start Month in (M-YYYY) format\n\nend_month: string\n End Month in (M-YYYY) format\n\ntime_granularity: string\n Time granularity of report. Can be: Daily, Weekly, Monthly\n\nmain_domain_only: boolean\n Get metrics on the Main Domain only (i.e. not including subdomains)", "id": "f4346:c3:m0"} {"signature": "def __init__(self, api_key, domain, start_month, end_month,main_domain_only=False, results_page=None):", "body": "self.domain = utils.domain_from_url(domain)self.start_month = start_monthself.end_month = end_monthself.main_domain_only = main_domain_onlyself.results_page = results_pagesuper(ReferralsAPI, self).__init__(api_key)", "docstring": "Parameters\n----------\napi_key: string\n SimilarWeb API key\n\ndomain: string\n Domain to query.\n\nstart_month: string\n Start Month in (M-YYYY) format\n\nend_month: string\n End Month in (M-YYYY) format\n\nmain_domain_only: boolean\n Get metrics on the Main Domain only (i.e. not including subdomains)\n\nresults_page: integer\n Enter for more than 10 results", "id": "f4346:c13:m0"} {"signature": "def __init__(self, api_key, domain, start_month, end_month,time_granularity=\"\", main_domain_only=False):", "body": "self.domain = utils.domain_from_url(domain)self.start_month = start_monthself.end_month = end_monthself.time_granularity = time_granularityself.main_domain_only = main_domain_onlysuper(TrafficAPI, self).__init__(api_key)", "docstring": "Parameters\n----------\napi_key: string\n SimilarWeb API key\n\ndomain: string\n Domain to query.\n\nstart_month: string\n Start Month in (M-YYYY) format\n\nend_month: string\n End Month in (M-YYYY) format\n\ntime_granularity: string\n Time granularity of report. Can be: Daily, Weekly, Monthly\n\nmain_domain_only: boolean\n Get metrics on the Main Domain only (i.e. not including subdomains)", "id": "f4346:c1:m0"} {"signature": "def __init__(self, api_key, category=None, country=None):", "body": "self.category = categoryself.country = countrysuper(TopSitesAPI, self).__init__(api_key)", "docstring": "Parameters\n----------\napi_key: string\n SimilarWeb API key\n\ncategory: string\n If left blank, `All Categories` will be requested.\n Use `http://api.similarweb.com/v1/TopSites/categories` to get a list of available categories.\n\ncountry: string\n If left blank, `Worldwide` will be requested.\n Use `http://api.similarweb.com/v1/TopSites/countries` to get a list of available categories.", "id": "f4346:c9:m0"} {"signature": "def __init__(self, api_key, endpoint, domain, start_month, end_month,main_domain_only=False, results_page=None):", "body": "if endpoint not in [\"\", \"\", ]:raise InvalidEndpointException(\"\"\"\")self.domain = utils.domain_from_url(domain)self.endpoint = endpointself.start_month = start_monthself.end_month = end_monthself.main_domain_only = main_domain_onlyself.results_page = results_pagesuper(SearchKeywordsAPI, self).__init__(api_key)", "docstring": "Parameters\n----------\napi_key: string\n SimilarWeb API key\n\nendpoint: string\n Endpoint to use. Can be: orgsearch, paidsearch\n\ndomain: string\n Domain to query.\n\nstart_month: string\n Start Month in (M-YYYY) format\n\nend_month: string\n End Month in (M-YYYY) format\n\nmain_domain_only: boolean\n Get metrics on the Main Domain only (i.e. not including subdomains)\n\nresults_page: integer\n Enter for more than 10 results", "id": "f4346:c11:m0"} {"signature": "def __init__(self, api_key, domain):", "body": "self.domain = utils.domain_from_url(domain)super(DestinationsAPI, self).__init__(api_key)", "docstring": "Parameters\n----------\napi_key: string\n SimilarWeb API key\n\ndomain: string\n Domain to query.", "id": "f4346:c12:m0"} {"signature": "def __init__(self, api_key, domain):", "body": "self.domain = utils.domain_from_url(domain)super(AlsoVisitedAPI, self).__init__(api_key)", "docstring": "Parameters\n----------\napi_key: string\n SimilarWeb API key\n\ndomain: string\n Domain to query.", "id": "f4346:c5:m0"} {"signature": "def __init__(self, api_key, domain):", "body": "self.domain = utils.domain_from_url(domain)super(WebsiteTagsAPI, self).__init__(api_key)", "docstring": "Parameters\n----------\napi_key: string\n SimilarWeb API key\n\ndomain: string\n Domain to query.", "id": "f4346:c6:m0"} {"signature": "def domain_from_url(url):", "body": "ext = tldextract.extract(url)if not ext.suffix:raise InvalidURLException()new_url = ext.domain + \"\" + ext.suffixreturn new_url", "docstring": "Get root domain from url.\nWill prune away query strings, url paths, protocol prefix and sub-domains\nExceptions will be raised on invalid urls", "id": "f4348:m0"} {"signature": "@staticmethoddef _parse_yaml(yaml=''):", "body": "return CarddavObject.from_user_input(address_book=mock.Mock(path=''), user_input=yaml,supported_private_objects=[], version='',localize_dates=False)", "docstring": "Parse some yaml string into a CarddavObject\n\n :param yaml: the yaml input string to parse\n :type yaml: str\n :returns: the parsed CarddavObject\n :rtype: CarddavObject", "id": "f4362:c0:m0"} {"signature": "def phone_subcommand(search_terms, vcard_list, parsable):", "body": "all_phone_numbers_list = []matching_phone_number_list = []for vcard in vcard_list:for type, number_list in sorted(vcard.get_phone_numbers().items(),key=lambda k: k[].lower()):for number in sorted(number_list):if config.display_by_name() == \"\":name = vcard.get_first_name_last_name()else:name = vcard.get_last_name_first_name()line_formatted = \"\".join([name, type, number])line_parsable = \"\".join([number, name, type])if parsable:phone_number_line = line_parsableelse:phone_number_line = line_formattedif re.search(search_terms,\"\" % (line_formatted, line_parsable),re.IGNORECASE | re.DOTALL):matching_phone_number_list.append(phone_number_line)elif len(re.sub(\"\", \"\", search_terms)) >= :if re.search(re.sub(\"\", \"\", search_terms),re.sub(\"\", \"\", number), re.IGNORECASE):matching_phone_number_list.append(phone_number_line)all_phone_numbers_list.append(phone_number_line)if matching_phone_number_list:if parsable:print(''.join(matching_phone_number_list))else:list_phone_numbers(matching_phone_number_list)elif all_phone_numbers_list:if parsable:print(''.join(all_phone_numbers_list))else:list_phone_numbers(all_phone_numbers_list)else:if not parsable:print(\"\")sys.exit()", "docstring": "Print a phone application friendly contact table.\n\n :param search_terms: used as search term to filter the contacts before\n printing\n :type search_terms: str\n :param vcard_list: the vcards to search for matching entries which should\n be printed\n :type vcard_list: list of carddav_object.CarddavObject\n :param parsable: machine readable output: columns devided by tabulator (\\t)\n :type parsable: bool\n :returns: None\n :rtype: None", "id": "f4369:m22"} {"signature": "def copy_or_move_subcommand(action, vcard_list, target_address_book_list):", "body": "source_vcard = choose_vcard_from_list(\"\" % action.title(), vcard_list)if source_vcard is None:print(\"\")sys.exit()else:print(\"\"% (action.title(), source_vcard, source_vcard.address_book))if len(target_address_book_list) == and target_address_book_list[] == source_vcard.address_book:print(\"\"% (target_address_book_list[], source_vcard))sys.exit()else:available_address_books = [abook for abook in target_address_book_listif abook != source_vcard.address_book]selected_target_address_book = choose_address_book_from_list(\"\", available_address_books)if selected_target_address_book is None:print(\"\")sys.exit()target_vcard = choose_vcard_from_list(\"\",get_contact_list_by_user_selection([selected_target_address_book],source_vcard.get_full_name(), True))if target_vcard is None:copy_contact(source_vcard, selected_target_address_book,action == \"\")else:if source_vcard == target_vcard:print(\"\" % target_vcard)if action == \"\":copy_contact(source_vcard, selected_target_address_book, True)else:print(\"\")else:print(\"\"\"\"\"\"\"\"\"\"\"\"\"\" % (target_vcard.address_book, source_vcard,source_vcard.print_vcard(), target_vcard.print_vcard(),\"\" if action == \"\" else \"\"))while True:input_string = input(\"\")if input_string.lower() == \"\":copy_contact(source_vcard, selected_target_address_book,action == \"\")breakif input_string.lower() == \"\":copy_contact(source_vcard, selected_target_address_book,action == \"\")target_vcard.delete_vcard_file()breakif input_string.lower() == \"\":merge_existing_contacts(source_vcard, target_vcard,action == \"\")breakif input_string.lower() in [\"\", \"\"]:print(\"\")break", "docstring": "Copy or move a contact to a different address book.\n\n :action: the string \"copy\" or \"move\" to indicate what to do\n :type action: str\n :param vcard_list: the contact list from which to select one for the action\n :type vcard_list: list of carddav_object.CarddavObject\n :param target_address_book_list: the list of target address books\n :type target_address_book_list: list(addressbook.AddressBook)\n :returns: None\n :rtype: None", "id": "f4369:m30"} {"signature": "def get_last_name_first_name(self):", "body": "last_names = []if self._get_last_names():last_names += self._get_last_names()first_and_additional_names = []if self._get_first_names():first_and_additional_names += self._get_first_names()if self._get_additional_names():first_and_additional_names += self._get_additional_names()if last_names and first_and_additional_names:return \"\".format(helpers.list_to_string(last_names, \"\"),helpers.list_to_string(first_and_additional_names, \"\"))elif last_names:return helpers.list_to_string(last_names, \"\")elif first_and_additional_names:return helpers.list_to_string(first_and_additional_names, \"\")else:return self.get_full_name()", "docstring": ":rtype: str", "id": "f4370:c0:m23"} {"signature": "def get_anniversary(self):", "body": "try:if self.vcard.anniversary.params.get(\"\")[] == \"\":return self.vcard.anniversary.valueexcept (AttributeError, IndexError, TypeError):passtry:return helpers.string_to_date(self.vcard.anniversary.value)except (AttributeError, ValueError):try:return helpers.string_to_date(self.vcard.x_anniversary.value)except (AttributeError, ValueError):passreturn None", "docstring": ":returns: contacts anniversary or None if not available\n :rtype: datetime.datetime or str", "id": "f4370:c0:m48"} {"signature": "@classmethoddef from_file(cls, address_book, filename, supported_private_objects,localize_dates):", "body": "return cls(address_book, filename, supported_private_objects, None,localize_dates)", "docstring": "Use this if you want to create a new contact from an existing .vcf\nfile.", "id": "f4370:c0:m2"} {"signature": "@classmethoddef new_contact(cls, address_book, supported_private_objects, version,localize_dates):", "body": "return cls(address_book, None, supported_private_objects, version,localize_dates)", "docstring": "Use this to create a new and empty contact.", "id": "f4370:c0:m1"} {"signature": "def get_nicknames(self):", "body": "nicknames = []for child in self.vcard.getChildren():if child.name == \"\":nicknames.append(child.value)return sorted(nicknames)", "docstring": ":rtype: list(list(str))", "id": "f4370:c0:m40"} {"signature": "def get_post_addresses(self):", "body": "post_adr_dict = {}for child in self.vcard.getChildren():if child.name == \"\":type = helpers.list_to_string(self._get_types_for_vcard_object(child, \"\"), \"\")if type not in post_adr_dict:post_adr_dict[type] = []post_adr_dict[type].append({\"\": child.value.box,\"\": child.value.extended,\"\": child.value.street,\"\": child.value.code,\"\": child.value.city,\"\": child.value.region,\"\": child.value.country})for post_adr_list in post_adr_dict.values():post_adr_list.sort(key=lambda x: (helpers.list_to_string(x[''], \"\").lower(),helpers.list_to_string(x[''], \"\").lower()))return post_adr_dict", "docstring": ": returns: dict of type and post address list\n:rtype: dict(str, list(dict(str,list|str)))", "id": "f4370:c0:m35"} {"signature": "def get_phone_numbers(self):", "body": "phone_dict = {}for child in self.vcard.getChildren():if child.name == \"\":type = helpers.list_to_string(self._get_types_for_vcard_object(child, \"\"), \"\")if type not in phone_dict:phone_dict[type] = []if child.value.lower().startswith(\"\"):phone_dict[type].append(child.value[:])else:phone_dict[type].append(child.value)for number_list in phone_dict.values():number_list.sort()return phone_dict", "docstring": ": returns: dict of type and phone number list\n:rtype: dict(str, list(str))", "id": "f4370:c0:m31"} {"signature": "def _get_webpages(self):", "body": "urls = []for child in self.vcard.getChildren():if child.name == \"\":urls.append(child.value)return sorted(urls)", "docstring": ":rtype: list(list(str))", "id": "f4370:c0:m46"} {"signature": "@classmethoddef from_user_input(cls, address_book, user_input,supported_private_objects, version, localize_dates):", "body": "contact = cls(address_book, None, supported_private_objects, version,localize_dates)contact._process_user_input(user_input)return contact", "docstring": "Use this if you want to create a new contact from user input.", "id": "f4370:c0:m3"} {"signature": "@staticmethoddef _compare_uids(uid1, uid2):", "body": "sum = for char1, char2 in zip(uid1, uid2):if char1 == char2:sum += else:breakreturn sum", "docstring": "Calculate the minimum length of initial substrings of uid1 and uid2\n for them to be different.\n\n :param uid1: first uid to compare\n :type uid1: str\n :param uid2: second uid to compare\n :type uid2: str\n :returns: the length of the shortes unequal initial substrings\n :rtype: int", "id": "f4373:c1:m4"} {"signature": "def __init__(self, name, private_objects=tuple(), localize_dates=True,skip=False):", "body": "self._loaded = Falseself.contacts = {}self._short_uids = Noneself.name = nameself._private_objects = private_objectsself._localize_dates = localize_datesself._skip = skip", "docstring": ":param name: the name to identify the address book\n:type name: str\n:param private_objects: the names of private vCard extension fields to\n load\n:type private_objects: iterable(str)\n:param localize_dates: wheater to display dates in the local format\n:type localize_dates: bool\n:param skip: skip unparsable vCard files\n:type skip: bool", "id": "f4373:c1:m0"} {"signature": "def __init__(self, name, abooks, **kwargs):", "body": "super().__init__(name, **kwargs)self._abooks = abooks", "docstring": ":param name: the name to identify the address book\n:type name: str\n:param abooks: a list of address books to combine in this collection\n:type abooks: list(AddressBook)\n:param **kwargs: further arguments for the parent constructor", "id": "f4373:c3:m0"} {"signature": "def _search_names(self, query):", "body": "regexp = re.compile(query, re.IGNORECASE | re.DOTALL)for contact in self.contacts.values():if regexp.search(contact.get_full_name()) is not None:yield contact", "docstring": "Search in the name filed for contacts matching query.\n\n :param query: the query to search for\n :type query: str\n :yields: all found contacts\n :rtype: generator(carddav_object.CarddavObject)", "id": "f4373:c1:m6"} {"signature": "def search(self, query, method=\"\"):", "body": "logging.debug('', self.name, query)if not self._loaded:self.load(query)if method == \"\":search_function = self._search_allelif method == \"\":search_function = self._search_nameselif method == \"\":search_function = self._search_uidelse:raise ValueError('''')return list(search_function(query))", "docstring": "Search this address book for contacts matching the query.\n\n The method can be one of \"all\", \"name\" and \"uid\". The backend for this\n address book migth be load()ed if needed.\n\n :param query: the query to search for\n :type query: str\n :param method: the type of fileds to use when seaching\n :type method: str\n :returns: all found contacts\n :rtype: list(carddav_object.CarddavObject)", "id": "f4373:c1:m8"} {"signature": "def _search_uid(self, query):", "body": "try:yield self.contacts[query]except KeyError:for uid in self.contacts:if uid.startswith(query):yield self.contacts[uid]", "docstring": "Search for contacts with a matching uid.\n\n :param query: the query to search for\n :type query: str\n :yields: all found contacts\n :rtype: generator(carddav_object.CarddavObject)", "id": "f4373:c1:m7"} {"signature": "def convert_to_vcard(name, value, allowed_object_type):", "body": "if isinstance(value, str):if allowed_object_type == ObjectType.list_with_strings:raise ValueError(\"\" + name + \"\")else:return value.strip()elif isinstance(value, list):if allowed_object_type == ObjectType.string:raise ValueError(\"\" + name + \"\")else:for entry in value:if not isinstance(entry, str):raise ValueError(\"\" + name + \"\")return [x.strip() for x in value if x]else:if allowed_object_type == ObjectType.string:raise ValueError(\"\" + name + \"\")elif allowed_object_type == ObjectType.list_with_strings:raise ValueError(\"\" + name + \"\")else:raise ValueError(\"\" + name + \"\")", "docstring": "converts user input into vcard compatible data structures\n :param name: object name, only required for error messages\n :type name: str\n :param value: user input\n :type value: str or list(str)\n :param allowed_object_type: set the accepted return type for vcard\n attribute\n :type allowed_object_type: enum of type ObjectType\n :returns: cleaned user input, ready for vcard or a ValueError\n :rtype: str or list(str)", "id": "f4374:m7"} {"signature": "def string_to_date(input):", "body": "for format_string in (\"\", \"\", \"\", \"\",\"\", \"\",\"\", \"\"):try:return datetime.strptime(input, format_string)except ValueError:passfor format_string in (\"\", \"\"):try:return datetime.strptime(''.join(input.rsplit(\"\", )),format_string)except ValueError:passraise ValueError", "docstring": "Convert string to date object.\n\n :param input: the date string to parse\n :type input: str\n :returns: the parsed datetime object\n :rtype: datetime.datetime", "id": "f4374:m3"} {"signature": "def convert_to_yaml(name, value, indentation, indexOfColon, show_multi_line_character):", "body": "strings = []if isinstance(value, list):if len(value) == and isinstance(value[], str):value = value[]elif len(value) == and isinstance(value[], list)and len(value[]) == and isinstance(value[][], str):value = value[][]if isinstance(value, str):strings.append(\"\" % ('' * indentation, name, '' * (indexOfColon-len(name)),indent_multiline_string(value, indentation+,show_multi_line_character)))elif isinstance(value, list):strings.append(\"\" % ('' * indentation, name, '' * (indexOfColon-len(name))))for outer in value:if isinstance(outer, list)and len(outer) == and isinstance(outer[], str):outer = outer[]if isinstance(outer, str):strings.append(\"\" % ('' * (indentation+), indent_multiline_string(outer, indentation+, show_multi_line_character)))elif isinstance(outer, list):strings.append(\"\" % ('' * (indentation+)))for inner in outer:if isinstance(inner, str):strings.append(\"\" % ('' * (indentation+), indent_multiline_string(inner, indentation+,show_multi_line_character)))return strings", "docstring": "converts a value list into yaml syntax\n :param name: name of object (example: phone)\n :type name: str\n :param value: object contents\n :type value: str, list(str), list(list(str))\n :param indentation: indent all by number of spaces\n :type indentation: int\n :param indexOfColon: use to position : at the name string (-1 for no space)\n :type indexOfColon: int\n :param show_multi_line_character: option to hide \"|\"\n :type show_multi_line_character: boolean\n :returns: yaml formatted string array of name, value pair\n :rtype: list(str)", "id": "f4374:m6"} {"signature": "def list_to_string(input, delimiter):", "body": "if isinstance(input, list):return delimiter.join(list_to_string(item, delimiter) for item in input)return input", "docstring": "converts list to string recursively so that nested lists are supported\n\n :param input: a list of strings and lists of strings (and so on recursive)\n :type input: list\n :param delimiter: the deimiter to use when joining the items\n :type delimiter: str\n :returns: the recursively joined list\n :rtype: str", "id": "f4374:m1"} {"signature": "@classmethoddef get_action(cls, alias):", "body": "for action, alias_list in cls.action_map.items():if alias in alias_list:return actionreturn None", "docstring": "Find the name of the action for the supplied alias. If no action is\n asociated with the given alias, None is returned.\n\n :param alias: the alias to look up\n :type alias: str\n :rturns: the name of the corresponding action or None\n :rtype: str or NoneType", "id": "f4375:c0:m0"} {"signature": "@classmethoddef get_actions(cls):", "body": "return cls.action_map.keys()", "docstring": "Find the names of all defined actions.\n\n :returns: all action names\n :rtype: iterable(str)", "id": "f4375:c0:m2"} {"signature": "def invalid_dict_pickle_keys(self, msg_dict):", "body": "no_pickle_keys = list()for key, val in msg_dict.items():try:pickle.dumps(key)pickle.dumps(val)except TypeError:no_pickle_keys.append(key) except pickle.PicklingError:no_pickle_keys.append(key) except pickle.UnpickleableError:no_pickle_keys.append(key) return no_pickle_keys", "docstring": "Return a list of keys that can't be pickled. Return [] if \n there are no pickling problems with the values associated with the\n keys. Return the list of keys, if there are problems.", "id": "f4376:c2:m11"} {"signature": "def respawn_dead_workers(self):", "body": "for w_id, p in self.workers.items():if not p.is_alive():if self.log_level >= :self.log.info(\"\".format(w_id))task = self.worker_assignments.get(w_id, {})if self.log_level >= and task != {}:self.log.info(\"\".format(w_id, task))error_suffix = \"\"if task != {}:del self.worker_assignments[w_id]if self.resubmit_on_error or self.hot_loop:self.work_todo.append(task)self.queue_task(task)if self.log_level >= :self.log.info(\"\".format(task))error_suffix = \"\".format(task)if self.log_level >= :self.log.debug(\"\".format(len(self.work_todo)))if self.log_level >= :self.log.info(\"\".format(w_id, error_suffix))self.workers[w_id] = Process(target=Worker,args=(w_id, self.t_q, self.r_q, self.worker_cycle_sleep),)self.workers[w_id].daemon = Trueself.workers[w_id].start()", "docstring": "Respawn workers / tasks upon crash", "id": "f4376:c2:m8"} {"signature": "def message_loop(self, t_q, r_q):", "body": "t_msg = {}while t_msg.get(\"\", \"\") != \"\":try:t_msg = t_q.get(True, self.cycle_sleep) self.task = t_msg.get(\"\", \"\") if self.task != \"\":self.task.task_start = time.time() self.r_q_send({\"\": self.w_id, \"\": self.task, \"\": \"\"})self.cycle_sleep = self.task.worker_loop_delayself.task.result = self.task.run()self.task.task_stop = time.time() self.r_q_send({\"\": self.w_id, \"\": self.task, \"\": \"\"}) self.task = Noneexcept Empty:passexcept Full:time.sleep()except:if self.task is not None:self.task.task_stop = time.time() tb_str = \"\".join(tb.format_exception(*(sys.exc_info())))self.r_q_send({\"\": self.w_id,\"\": self.task,\"\": tb_str,\"\": \"\",})return", "docstring": "Loop through messages and execute tasks", "id": "f4376:c0:m4"} {"signature": "@propertydef log_message(self):", "body": "time_delta = deepcopy(self.time_delta)total_work_time = self.worker_count * time_deltatime_worked = sum(self.exec_times)pct_busy = time_worked / total_work_time * min_task_time = min(self.exec_times)avg_task_time = sum(self.exec_times) / len(self.exec_times)max_task_time = max(self.exec_times)min_queue_time = min(self.queue_times)avg_queue_time = sum(self.queue_times) / len(self.queue_times)max_queue_time = max(self.queue_times)time_delta = self.time_deltatotal_tasks = len(self.exec_times)avg_task_rate = total_tasks / time_deltaself.reset()task_msg = \"\"\"\"\"\".format(total_tasks, round(avg_task_rate, ), self.worker_count, round(pct_busy, ))task_mam = \"\"\"\"\"\".format(round(min_task_time, ), round(avg_task_time, ), round(max_task_time, ))queue_mam = \"\"\"\"\"\".format(round(min_queue_time, ), round(avg_queue_time, ), round(max_queue_time, ))return \"\"\"\"\"\".format(task_msg, task_mam, queue_mam)", "docstring": "Build a log message and reset the stats", "id": "f4376:c1:m5"} {"signature": "@abstractmethoddef run(self):", "body": "pass", "docstring": "Define what should be done", "id": "f4377:c0:m3"} {"signature": "def main():", "body": "args = get_args()ret_code = args.target(args)_logger.debug('', ret_code)sys.exit(ret_code)", "docstring": "Main entry point for the CLI.", "id": "f4382:m0"} {"signature": "def _encode_long(self, val):", "body": "return ''.join([self.alphabet[(val//len(self.alphabet)**i) % len(self.alphabet)]for i in reversed(range(self.chunklen[]))])", "docstring": "encodes an integer of 8*self.chunklen[0] bits using the specified\nalphabet", "id": "f4385:c0:m3"} {"signature": "def _get_chunk(self, data, index):", "body": "return data[index*self.chunklen[]:(index+)*self.chunklen[]]", "docstring": "partition the data into chunks and retrieve the chunk at the given index", "id": "f4385:c0:m5"} {"signature": "@_uses_dbdef get_domain(self, domain_name):", "body": "protocol = self.database_uri.split('', )[]if protocol in ('', ''):return self._get_domain_from_rest_api(domain_name)else:domain = self._get_domain_from_db(domain_name)if domain:return domainelse:raise NoSuchDomainException", "docstring": "Get the :class:`Domain ` object from a name.\n\n :param domain_name: The domain name to fetch the object for.\n :returns: The :class:`Domain ` class with this domain_name if found, else\n None.", "id": "f4386:c1:m3"} {"signature": "@_uses_dbdef search(self, query):", "body": "results = self.session.query(Domain).filter(Domain.name.ilike('' % query)).all()return results", "docstring": "Search the database for the given query. Will find partial matches.", "id": "f4386:c1:m2"} {"signature": "def create_domain(self, domain_name, username=None, alphabet=Domain.DEFAULT_ALPHABET,length=Domain.DEFAULT_KEY_LENGTH):", "body": "try:return self._create_domain(domain_name, username, alphabet, length)except Exception as ex:_logger.warn(\"\", ex)raise DuplicateDomainException", "docstring": "Create a new domain entry in the database.\n\n :param username: The username to associate with this domain.\n :param alphabet: A character set restriction to impose on keys generated for this domain.\n :param length: The length of the generated key, in case of restrictions on the site.", "id": "f4386:c1:m7"} {"signature": "@_uses_dbdef modify_domain(self, domain_name, new_salt=False, username=None):", "body": "domain = self._get_domain_from_db(domain_name)if domain is None:raise NoSuchDomainExceptionif new_salt:_logger.info(\"\")domain.new_salt()if username is not None:domain.username = usernamereturn domain", "docstring": "Modify an existing domain.\n\n :param domain_name: The name of the domain to modify.\n :param new_salt: Whether to generate a new salt for the domain.\n :param username: If given, change domain username to this value.\n :returns: The modified :class:`Domain ` object.", "id": "f4386:c1:m6"} {"signature": "def bootstrap(self, path_or_uri):", "body": "_logger.debug(\"\", path_or_uri)self.database_uri = _urify_db(path_or_uri)db = sa.create_engine(self.database_uri)Base.metadata.create_all(db)", "docstring": "Initialize a database.\n\n :param database_path: The absolute path to the database to initialize.", "id": "f4386:c1:m1"} {"signature": "def get_key(self):", "body": "master_password = getpass.getpass('')return self.derive_key(master_password)", "docstring": "Fetches the key for the domain. Prompts the user for password.\n\n Thin wrapper around :func:`Domain.derive_key `.", "id": "f4386:c0:m4"} {"signature": "def create_dsmr_protocol(dsmr_version, telegram_callback, loop=None):", "body": "if dsmr_version == '':specification = telegram_specifications.V2_2serial_settings = SERIAL_SETTINGS_V2_2elif dsmr_version == '':specification = telegram_specifications.V4serial_settings = SERIAL_SETTINGS_V4elif dsmr_version == '':specification = telegram_specifications.V5serial_settings = SERIAL_SETTINGS_V5else:raise NotImplementedError(\"\",dsmr_version)protocol = partial(DSMRProtocol, loop, TelegramParser(specification),telegram_callback=telegram_callback)return protocol, serial_settings", "docstring": "Creates a DSMR asyncio protocol.", "id": "f4402:m0"} {"signature": "def create_tcp_dsmr_reader(host, port, dsmr_version,telegram_callback, loop=None):", "body": "protocol, _ = create_dsmr_protocol(dsmr_version, telegram_callback, loop=None)conn = loop.create_connection(protocol, host, port)return conn", "docstring": "Creates a DSMR asyncio protocol coroutine using TCP connection.", "id": "f4402:m2"} {"signature": "def connection_lost(self, exc):", "body": "if exc:self.log.exception('')else:self.log.info('')self._closed.set()", "docstring": "Stop when connection is lost.", "id": "f4402:c0:m3"} {"signature": "def _find_telegrams(self):", "body": "return re.findall(r'',self._buffer,re.DOTALL)", "docstring": "Find complete telegrams in buffer from start ('/') till ending\nchecksum ('!AB12\\r\\n').\n:rtype: list", "id": "f4403:c0:m4"} {"signature": "def get_all(self):", "body": "for telegram in self._find_telegrams():self._remove(telegram)yield telegram", "docstring": "Remove complete telegrams from buffer and yield them.\n:rtype generator:", "id": "f4403:c0:m1"} {"signature": "def __init__(self, telegram_specification, apply_checksum_validation=True):", "body": "self.telegram_specification = telegram_specificationself.apply_checksum_validation = apply_checksum_validation", "docstring": ":param telegram_specification: determines how the telegram is parsed\n:param apply_checksum_validation: validate checksum if applicable for\n telegram DSMR version (v4 and up).\n:type telegram_specification: dict", "id": "f4404:c0:m0"} {"signature": "def mutex(func):", "body": "def wrapper(*args, **kwargs):\"\"\"\"\"\"lock = args[].locklock.acquire(True)try:return func(*args, **kwargs)except:raisefinally:lock.release()return wrapper", "docstring": "use a thread lock on current method, if self.lock is defined", "id": "f4414:m2"} {"signature": "def login_required(function=None, redirect_field_name=REDIRECT_FIELD_NAME,login_url=None):", "body": "actual_decorator = request_passes_test(lambda r: r.session.get(''),login_url=login_url,redirect_field_name=redirect_field_name)if function:return actual_decorator(function)return actual_decorator", "docstring": "Decorator for views that checks that the user is logged in, redirecting\nto the log-in page if necessary.", "id": "f4421:m5"} {"signature": "def get_page_of_iterator(iterator, page_size, page_number):", "body": "try:page_number = validate_page_number(page_number)except (PageNotAnInteger, EmptyPage):page_number = start = (page_number - ) * page_sizeend = (page_number * page_size) + skipped_items = list(islice(iterator, start))items = list(islice(iterator, end))if len(items) == and page_number != :items = skipped_itemspage_number = has_next = len(items) > page_sizeitems = items[:page_size]return NoCountPage(items, page_number, page_size, has_next)", "docstring": "Get a page from an interator, handling invalid input from the page number\nby defaulting to the first page.", "id": "f4426:m7"} {"signature": "def get_last_value_from_timeseries(timeseries):", "body": "if not timeseries:return for metric, points in timeseries.items():return next((p[''] for p in reversed(points) if p[''] > ), )", "docstring": "Gets the most recent non-zero value for a .last metric or zero\n for empty data.", "id": "f4426:m1"} {"signature": "def get_timestamp(dt):", "body": "return time.mktime(dt.timetuple()) * ", "docstring": "Returns a Unix timestamp in microseconds for a given datetime.", "id": "f4426:m2"} {"signature": "def new_junction_reparse_buffer(path=None):", "body": "if path is None:substnamebufferchars = else:substnamebufferchars = len(path) + class REPARSE_DATA_BUFFER(ctypes.Structure):_fields_ = [(\"\", ctypes.c_ulong),(\"\", ctypes.c_ushort),(\"\", ctypes.c_ushort),(\"\", ctypes.c_ushort),(\"\", ctypes.c_ushort),(\"\", ctypes.c_ushort),(\"\", ctypes.c_ushort),(\"\", ctypes.c_wchar * substnamebufferchars),(\"\", ctypes.c_wchar * )]numpathbytes = (substnamebufferchars - ) * sizeof(ctypes.c_wchar)buffersize = (numpathbytes + (sizeof(ctypes.c_wchar) * ) + (sizeof(ctypes.c_ushort) * ))if path is None:buffer = REPARSE_DATA_BUFFER()buffer.ReparseTag = IO_REPARSE_TAG_MOUNT_POINTelse:buffer = REPARSE_DATA_BUFFER(IO_REPARSE_TAG_MOUNT_POINT,buffersize,,, numpathbytes,numpathbytes + , ,path,\"\")return (buffer, buffersize + REPARSE_DATA_BUFFER.SubstituteNameOffset.offset)", "docstring": "Given a path, return a pair containing a new REPARSE_DATA_BUFFER and the\nlength of the buffer (not necessarily the same as sizeof due to packing\nissues).\nIf no path is provided, the maximum length is assumed.", "id": "f4442:m0"} {"signature": "def getdirinfo(path):", "body": "return _getfileinfo(path, FILE_FLAG_BACKUP_SEMANTICS)", "docstring": "Return information for the directory at the given path. This is going to be a\nstruct of type BY_HANDLE_FILE_INFORMATION.", "id": "f4444:m1"} {"signature": "def read_cnf(path):", "body": "clauses = []for line in open(path):parts = line.split()if not parts or parts[] == '':continueif parts[] == '':assert len(parts) == assert parts[] == ''n_vars, n_clauses = [int(n) for n in parts[:]]continueif parts[] == '':breakassert parts[-] == ''clauses.append([int(lit) for lit in parts[:-]])assert len(clauses) == n_clausesreturn clauses, n_vars", "docstring": "read a DIMACS cnf formatted file from `path`, and return the clauses\nand number of variables", "id": "f4446:m0"} {"signature": "def evaluate(clauses, sol):", "body": "sol_vars = {} for i in sol:sol_vars[abs(i)] = bool(i > )return all(any(sol_vars[abs(i)] ^ bool(i < ) for i in clause)for clause in clauses)", "docstring": "evaluate the clauses with the solution", "id": "f4446:m1"} {"signature": "def assert_frame_equal_noindex(left, right):", "body": "assert_frame_equal(left.reset_index(drop=True), right.reset_index(drop=True))", "docstring": "compare dataframes but ignore index values.\n\n This is useful to compare filtered out dataframes with a manually\n built one.", "id": "f4481:m1"} {"signature": "def default_zip_file(df, df2):", "body": "with io.BytesIO() as memory_file:with zipfile.ZipFile(memory_file, mode='') as zfile:tmp = tempfile.NamedTemporaryFile()with open(tmp.name, mode=''):joblib.dump(df, tmp.name)with open(tmp.name, mode='') as f:content = f.read()zfile.writestr('', content)tmp.close()tmp2 = tempfile.NamedTemporaryFile()with open(tmp2.name, mode=''):joblib.dump(df2, tmp2.name)with open(tmp2.name, mode='') as f:content = f.read()zfile.writestr('', content)tmp2.close()memory_file.seek()return memory_file.getvalue()", "docstring": "Return zip file with two DF saved using joblib.", "id": "f4485:m0"} {"signature": "def get_orig_function(f):", "body": "try:while True:f = f.__wrapped__except AttributeError:return f", "docstring": "Make use of the __wrapped__ attribute to find the original function\n of a decorated function.", "id": "f4489:m0"} {"signature": "def compute_ffill_by_group(df,id_cols: List[str],reference_cols: List[str],value_col: str):", "body": "check_params_columns_duplicate(id_cols + reference_cols + [value_col])df = df.sort_values(by=id_cols + reference_cols)df = df.set_index(id_cols)df[''] = - df[value_col].isnull().astype(int)df[''] = df.groupby(level=list(range(, len(id_cols) - )))[''].cumsum()df[value_col] = df[value_col].ffill()df.loc[df[''] == , value_col] = Nonedel df['']return df.reset_index()", "docstring": "Compute `ffill` with `groupby`\nDedicated method as there is a performance issue with a simple groupby/fillna (2017/07)\nThe method `ffill` propagates last valid value forward to next values.\n\n---\n\n### Parameters\n\n*mandatory :*\n- `id_cols` (*list of str*): names of columns used to create each group.\n- `reference_cols` (*list of str*): names of columns used to sort.\n- `value_col` (*str*): name of the columns to fill.\n\n---\n\n### Example\n\n**Input**\n\nname | rank | value\n:------:|:--------------:|:--------:\nA | 1 | 2\nA | 2 | 5\nA | 3 | null\nB | 1 | null\nB | 2 | 7\n\n```cson\ncompute_ffill_by_group:\n id_cols: ['name']\n reference_cols: ['rank']\n value_col: 'value'\n```\n\n**Ouput**\n\nname | rank | value\n:------:|:--------------:|:--------:\nA | 1 | 2\nA | 2 | 5\nA | 3 | 5\nB | 1 | null\nB | 2 | 7", "id": "f4492:m0"} {"signature": "def compute_evolution_by_criteria(df,id_cols: List[str],value_col: str,compare_to: str,method: str = '',format: str = '',offseted_suffix: str = '',evolution_col_name: str = '',raise_duplicate_error: bool = True):", "body": "return __compute_evolution(**locals())", "docstring": "This function answers the question: how has a value changed compare to a specific value ?\n\n---\n\n### Parameters\n\n*mandatory :*\n- `id_cols` (*list*): columns used to create each group\n- `value_col` (*str*): name of the column containing the value to compare\n- `compare_to` (*str*): the query identifying a specific set of values for comparison.\n\n*optional :*\n- `method` (*str*): either `\"abs\"` for absolute values or `\"pct\"` for the evolution in percentage of previous value.\n- `offseted_suffix` (*str*): suffix of the offseted column. By default, `\"_offseted\"`.\n- `evolution_col_name` (*str*): name given to the evolution column. By default, `\"evolution_computed\"`.\n- `raise_duplicate_error` (*boolean*): raise an error when the dataset has duplicated values with the given `id_cols`.\n- `format` (*str*): `'df'` # Do not change it !!!\n\n---\n\n### Example\n\n**Input**\n\n| id_cols | value_col | month|\n|:---------:|:------------:|:-------:|\n| A | 100 | 1|\n| | 250 | 12|\n| B | 300 | 1|\n| | 200 | 12|\n\n```cson\ncompute_evolution_by_criteria:\n id_cols: \"id_cols\"\n value_col: \"value_col\"\n compare_to: \"month==12\"\n```\n\n**Output**\n\n| id_cols | value_col | month|\tvalue_offseted\t| evolution_computed|\n|:---------:|:------------:|:-------:|:----------------:|:-----------------:|\n| A | 100 | 1| 250| -150|\n| | 250 | 12| 250| 0|\n| B | 300 | 1| 200| 100|\n| | 200 | 12| 200| 0|", "id": "f4494:m1"} {"signature": "def compute_cumsum(df,id_cols: List[str],reference_cols: List[str],value_cols: List[str],new_value_cols: List[str] = None,cols_to_keep: List[str] = None):", "body": "if cols_to_keep is None:cols_to_keep = []if new_value_cols is None:new_value_cols = value_colsif len(value_cols) != len(new_value_cols):raise ParamsValueError('''')check_params_columns_duplicate(id_cols + reference_cols + cols_to_keep + value_cols)levels = list(range(, len(id_cols)))df = df.groupby(id_cols + reference_cols + cols_to_keep).sum()df[new_value_cols] = df.groupby(level=levels)[value_cols].cumsum()return df.reset_index()", "docstring": "Compute cumsum for a group of columns.\n\n---\n\n### Parameters\n\n*mandatory :*\n- `id_cols` (*list*): the columns id to create each group\n- `reference_cols` (*list*): the columns to order the cumsum\n- `value_cols` (*list*): the columns to cumsum\n\n*optional :*\n- `new_value_cols` (*list*): the new columns with the result cumsum\n- `cols_to_keep` (*list*): other columns to keep in the dataset.\n This option can be used if there is only one row by group [id_cols + reference_cols]\n\n---\n\n### Example\n\n**Input**\n\nMONTH | DAY | NAME | VALUE | X\n:---:|:---:|:--:|:---:|:---:\n 1 | 1 | A | 1 | lo\n 2 | 1 | A | 1 | lo\n 2 | 15 | A | 1 | la\n 1 | 15 | B | 1 | la\n\n```cson\ncompute_cumsum:\n id_cols: ['NAME']\n reference_cols: ['MONTH', 'DAY']\n cumsum_cols: ['VALUE']\n cols_to_keep: ['X']\n```\n\n**Output**\n\nNAME | MONTH | DAY | X | VALUE\n:---:|:---:|:--:|:---:|:---:\n A | 1 | 1 | lo | 1\n A | 2 | 1 | la | 2\n A | 2 | 15 | lo | 3\n B | 1 | 15 | la | 1", "id": "f4497:m0"} {"signature": "def log(logger=None, start_message='', end_message=''):", "body": "def actual_log(f, real_logger=logger):logger = real_logger or _logger@wraps(f)def timed(*args, **kwargs):logger.info(f'')start = time.time()res = f(*args, **kwargs)end = time.time()logger.info(f'')return resreturn timedif callable(logger):return actual_log(logger, real_logger=None)return actual_log", "docstring": "Basic log decorator\nCan be used as :\n- @log (with default logger)\n- @log(mylogger)\n- @log(start_message='Hello !\", logger=mylogger, end_message='Bye !')", "id": "f4500:m9"} {"signature": "def percentage(df,column: str,group_cols: Union[str, List[str]] = None,new_column: str = None):", "body": "new_column = new_column or columnif group_cols is None:df[new_column] = * df[column] / sum(df[column])else:df[new_column] = * df[column] / df.groupby(group_cols)[column].transform(sum)return df", "docstring": "Add a column to the dataframe according to the groupby logic on group_cols\n\n---\n\n### Parameters\n\n*mandatory :*\n- `column` (*str*): name of the desired column you need percentage on\n\n*optional :*\n- `group_cols` (*list*): names of columns for the groupby logic\n- `new_column` (*str*): name of the output column. By default `column` will be overwritten.\n\n---\n\n**Input**\n\n| gender | sport | number |\n|:------:|:----------:|:------:|\n| male | bicycle | 17 |\n| female | basketball | 17 |\n| male | basketball | 3 |\n| female | football | 7 |\n| female | running | 30 |\n| male | running | 20 |\n| male | football | 21 |\n| female | bicycle | 17 |\n\n```cson\npercentage:\n new_column: 'number_percentage'\n column: 'number'\n group_cols: ['sport']\n```\n\n**Output**\n\n| gender | sport | number | number_percentage |\n|:------:|:----------:|:------:|:-----------------:|\n| male | bicycle | 17 | 50.0 |\n| female | basketball | 17 | 85.0 |\n| male | basketball | 3 | 15.0 |\n| female | football | 7 | 25.0 |\n| female | running | 30 | 60.0 |\n| male | running | 20 | 40.0 |\n| male | football | 21 | 75.0 |\n| female | bicycle | 17 | 50.0 |", "id": "f4502:m0"} {"signature": "def add_months(dateobj, nb_months: int):", "body": "nb_years, nb_months = divmod(nb_months, )month = dateobj.month + nb_monthsif month > :nb_years += month -= year = dateobj.year + nb_yearslastday = monthrange(year, month)[]return dateobj.replace(year=year, month=month, day=min(lastday, dateobj.day))", "docstring": "return `dateobj` + `nb_months`\n\n If landing date doesn't exist (e.g. february, 30th), return the last\n day of the landing month.\n\n >>> add_months(date(2018, 1, 1), 1)\n datetime.date(2018, 1, 1)\n >>> add_months(date(2018, 1, 1), -1)\n datetime.date(2017, 12, 1)\n >>> add_months(date(2018, 1, 1), 25)\n datetime.date(2020, 2, 1)\n >>> add_months(date(2018, 1, 1), -25)\n datetime.date(2015, 12, 1)\n >>> add_months(date(2018, 1, 31), 1)\n datetime.date(2018, 2, 28)", "id": "f4503:m2"} {"signature": "def subtract(df, new_column, column_1, column_2):", "body": "return _basic_math_operation(df, new_column, column_1, column_2, op='')", "docstring": "DEPRECATED - use `formula` instead", "id": "f4504:m2"} {"signature": "def absolute_values(df, *, column: str, new_column: str = None):", "body": "new_column = new_column or columndf[new_column] = abs(df[column])return df", "docstring": "Get the absolute numeric value of each element of a column\n\n---\n\n### Parameters\n\n*mandatory :*\n- `column` (*str*): name of the column\n\n*optional :*\n- `new_column` (*str*): name of the column containing the result.\n By default, no new column will be created and `column` will be replaced.\n\n---\n\n### Example\n\n**Input**\n\n| ENTITY | VALUE_1 | VALUE_2 |\n|:------:|:-------:|:-------:|\n| A | -1.512 | -1.504 |\n| A | 0.432 | 0.14 |\n\n```cson\nabsolute_values:\n column: 'VALUE_1'\n new_column: 'Pika'\n```\n\n**Output**\n\n| ENTITY | VALUE_1 | VALUE_2 | Pika |\n|:------:|:-------:|:-------:|:-----:|\n| A | -1.512 | -1.504 | 1.512 |\n| A | 0.432 | 0.14 | 0.432 |", "id": "f4504:m9"} {"signature": "def _basic_math_operation(df, new_column, column_1, column_2, op):", "body": "if not isinstance(column_1, (str, int, float)):raise TypeError(f'')if not isinstance(column_2, (str, int, float)):raise TypeError(f'')if isinstance(column_1, str):column_1 = df[column_1]if isinstance(column_2, str):column_2 = df[column_2]operator = getattr(_operator, op)df[new_column] = operator(column_1, column_2)return df", "docstring": "Basic mathematical operation to apply operator on `column_1` and `column_2`\nBoth can be either a number or the name of a column of `df`\nWill create a new column named `new_column`", "id": "f4504:m0"} {"signature": "def multiply(df, new_column, column_1, column_2):", "body": "return _basic_math_operation(df, new_column, column_1, column_2, op='')", "docstring": "DEPRECATED - use `formula` instead", "id": "f4504:m3"} {"signature": "def cumsum(df, new_column: str, column: str, index: list, date_column: str, date_format: str):", "body": "logging.getLogger(__name__).warning(f\"\")date_temp = ''if isinstance(index, str):index = [index]levels = list(range(, len(index)))df[date_temp] = pd.to_datetime(df[date_column], format=date_format)reference_cols = [date_temp, date_column]df = df.groupby(index + reference_cols).sum()df[new_column] = df.groupby(level=levels)[column].cumsum()df.reset_index(inplace=True)del df[date_temp]return df", "docstring": "DEPRECATED - please use `compute_cumsum` instead", "id": "f4507:m0"} {"signature": "def fillna(df, column: str, value=None, column_value=None):", "body": "if column not in df.columns:df[column] = nanif value is not None and column_value is not None:raise ValueError('')if value is not None:df[column] = df[column].fillna(value)if column_value is not None:if column_value not in df.columns:raise ValueError(f'')df[column] = df[column].fillna(df[column_value])return df", "docstring": "Can fill NaN values from a column with a given value or a column\n\n---\n\n### Parameters\n\n- `column` (*str*): name of column you want to fill\n- `value`: NaN will be replaced by this value\n- `column_value`: NaN will be replaced by value from this column\n\n*NOTE*: You must set either the 'value' parameter or the 'column_value' parameter\n\n---\n\n### Example\n\n**Input**\n\n| variable | wave | year | my_value |\n|:--------:|:-------:|:--------:|:--------:|\n| toto | wave 1 | 2014 | 300 |\n| toto | wave 1 | 2015 | |\n| toto | wave 1 | 2016 | 450 |\n\n```cson\nfillna:\n column: 'my_value'\n value: 0\n```\n\n**Output**\n\n| variable | wave | year | my_value |\n|:--------:|:-------:|:--------:|:--------:|\n| toto | wave 1 | 2014 | 300 |\n| toto | wave 1 | 2015 | 0 |\n| toto | wave 1 | 2016 | 450 |", "id": "f4508:m0"} {"signature": "def concat(df,*,columns: List[str],new_column: str,sep: str = None):", "body": "if len(columns) < :raise ValueError('')first_col, *other_cols = columnsdf.loc[:, new_column] = df[first_col].astype(str).str.cat(df[other_cols].astype(str), sep=sep)return df", "docstring": "Concatenate `columns` element-wise\nSee [pandas doc](\nhttps://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.str.cat.html) for more information\n\n---\n\n### Parameters\n\n*mandatory :*\n- `columns` (*list*): list of columns to concatenate (at least 2 columns)\n- `new_column` (*str*): the destination column\n\n*optional :*\n- `sep` (*str*): the separator", "id": "f4510:m7"} {"signature": "def replace_pattern(df,column: str,*,pat: str,repl: str,new_column: str = None,case: bool = True,regex: bool = True):", "body": "new_column = new_column or columndf.loc[:, new_column] = df[column].str.replace(pat, repl, case=case, regex=regex)return df", "docstring": "Replace occurrences of pattern/regex in `column` with some other string\nSee [pandas doc](\nhttps://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.str.replace.html) for more information\n\n---\n\n### Parameters\n\n*mandatory :*\n- `column` (*str*): the column\n- `pat` (*str*): character sequence or regular expression\n- `repl` (*str*): replacement string\n\n*optional :*\n- `new_column` (*str*): the destination column (if not set, `column` will be used)\n- `case` (*boolean*): if true, case sensitive.\n- `regex` (*boolean*): default true", "id": "f4510:m10"} {"signature": "def contains(df,column: str,*,pat: str,new_column: str = None,case: bool = True,na: Any = None,regex: bool = True):", "body": "new_column = new_column or columndf.loc[:, new_column] = df[column].str.contains(pat, case=case, na=na, regex=regex)return df", "docstring": "Test if pattern or regex is contained within strings of `column`\nSee [pandas doc](\nhttps://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.str.contains.html) for more information\n\n---\n\n### Parameters\n\n*mandatory :*\n- `column` (*str*): the column\n- `pat` (*str*): character sequence or regular expression.\n\n*optional :*\n- `new_column` (*str*): the destination column (if not set, `column` will be used)\n- `case` (*boolean*): if true, case sensitive.\n- `na`: fill value for missing values.\n- `regex` (*boolean*): default true", "id": "f4510:m8"} {"signature": "def repeat(df,column: str,*,times: int,new_column: str = None):", "body": "new_column = new_column or columndf.loc[:, new_column] = df[column].str.repeat(times)return df", "docstring": "Duplicate each string in `column` by indicated number of time\nSee [pandas doc](\nhttps://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.str.repeat.html) for more information\n\n---\n\n### Parameters\n\n*mandatory :*\n- `column` (*str*): the column\n- `times` (*int*): times to repeat the string\n\n*optional :*\n- `new_column` (*str*): the destination column (if not set, `column` will be used)", "id": "f4510:m9"} {"signature": "def _compute_start_end(df, start, end):", "body": "result = {}time_dict = {'': start, '': end}totals = df.groupby('').agg({'': sum}).reset_index()for time_name, time in time_dict.items():if not totals[totals[''] == time['']].empty:value = totals.loc[totals[''] == time[''], ''].values[]else:value = result[time_name] = pd.DataFrame([{'': value,'': time[''],'': time['']}])return result[''], result['']", "docstring": "Compute two dataframes with value for start and end\nArgs:\n totals(dataframe):\n\nReturns: Dataframe, Dataframe", "id": "f4511:m2"} {"signature": "def _compute_inside_group(df):", "body": "inside_group = df.copy()inside_group[''] = ''inside_group[''] = inside_group[''] / inside_group['']inside_group.drop(['', '', ''],axis=, inplace=True)inside_group.rename(columns={'': ''},inplace=True)return inside_group", "docstring": "Compute inside Group\nArgs:\n df(dataframe):\n\nReturns: Dataframe", "id": "f4511:m4"} {"signature": "def melt(df,id: List[str],value: List[str],dropna=False):", "body": "df = df[(id + value)]df = pd.melt(df, id_vars=id, value_vars=value)if dropna:df = df.dropna(subset=[''])return df", "docstring": "A melt will transform a dataset by creating a column \"variable\" and a column \"value\".\nThis function is useful to transform a dataset into a format where one or more columns\nare identifier variables, while all other columns, considered measured\nvariables (value_vars), are \u201cunpivoted\u201d to the row axis, leaving just two\nnon-identifier columns, `\"variable\"` and `\"value\"`.\n\n---\n\n### Parameters\n\n*mandatory :*\n- `id` (*list of str*): names of the columns that must be kept in column.\n- `value` (*list of str*): names of the columns that will be transformed in long format (in rows).\n\n*optional :*\n- `dropna` (*boolean*): It allows you to drop missing values.\n\n---\n\n### Example\n\n**Input**\n\n| my_label | my_value | my_column_1 | my_column_2 | info_1 | info_2 | info_3 |\n|:--------:|:--------:|:-----------:|:-----------:|:------:|:------:|:------:|\n| toto | 10 | S45 | Lalaland | 10 | 20 | None |\n\n```cson\nmelt:\n id: ['my_label', 'my_value' 'my_column_1', 'my_colum_2']\n value: ['info_1', 'info_2', 'info_3']\n dropna: true\n```\n\n**Ouput**\n\n| my_label | my_value | my_column_1 | my_column_2 | variable | value |\n|:--------:|:--------:|:-----------:|:-----------:|:--------:|:------:|\n| toto | 10 | S45 | Lalaland | info_1 | 10 |\n| toto | 10 | S45 | Lalaland | info_2 | 20 |", "id": "f4512:m0"} {"signature": "def argmin(df, column: str, groups: Union[str, List[str]] = None):", "body": "if groups is None:df = df[df[column] == df[column].min()].reset_index(drop=True)else:group_min = df.groupby(groups)[column].transform('')df = (df.loc[df[column] == group_min, :].drop_duplicates().reset_index(drop=True))return df", "docstring": "Keep the row of the data corresponding to the minimal value in a column\n\n---\n\n### Parameters\n\n*mandatory :*\n- `column` (str): name of the column containing the value you want to keep the minimum\n\n*optional :*\n- `groups` (*str or list(str)*): name of the column(s) used for 'groupby' logic\n(the function will return the argmax by group)\n---\n\n### Example\n\n**Input**\n\n| variable | wave | year | value |\n|:--------:|:-------:|:--------:|:-----:|\n| toto | wave 1 | 2014 | 300 |\n| toto | wave 1 | 2015 | 250 |\n| toto | wave 1 | 2016 | 450 |\n\n```cson\nargmin:\n column: 'year'\n]\n```\n\n**Output**\n\n| variable | wave | year | value |\n|:--------:|:-------:|:--------:|:-----:|\n| toto | wave 1 | 2015 | 250 |", "id": "f4515:m1"} {"signature": "def cast(df, column: str, type: str, new_column=None):", "body": "new_column = new_column or columndf[new_column] = df[column].astype(type)return df", "docstring": "Convert column's type into type\n\n---\n\n### Parameters\n\n*mandatory :*\n- `column` (*str*): name of the column to convert\n- `type` (*str*): output type. It can be :\n - `\"int\"` : integer type\n - `\"float\"` : general number type\n - `\"str\"` : text type\n\n*optional :*\n- `new_column` (*str*): name of the output column.\n By default the `column` arguments is modified.\n\n---\n\n### Example\n\n**Input**\n\n| Column 1 | Column 2 | Column 3 |\n|:-------:|:--------:|:--------:|\n| 'one' | '2014' | 30.0 |\n| 'two' | 2015.0 | '1' |\n| 3.1 | 2016 | 450 |\n\n```cson\npostprocess: [\n cast:\n column: 'Column 1'\n type: 'str'\n cast:\n column: 'Column 2'\n type: 'int'\n cast:\n column: 'Column 3'\n type: 'float'\n]\n```\n\n**Output**\n\n| Column 1 | Column 2 | Column 3 |\n|:-------:|:------:|:--------:|\n| 'one' | 2014 | 30.0 |\n| 'two' | 2015 | 1.0 |\n| '3.1' | 2016 | 450.0 |", "id": "f4519:m3"} {"signature": "def sort(df, columns: Union[str, List[str]], order: Union[str, List[str]] = ''):", "body": "if isinstance(columns, str):columns = [columns]if isinstance(order, str):assert order in ['', '']orders = [order == ''] * len(columns)else:assert len(order) == len(columns), \"\"\"\"orders = []for ord in order:assert ord in ['', ''], f\"\"\"\"orders.append(ord == '')return df.sort_values(columns, ascending=orders)", "docstring": "Sort the data by the value in specified columns\n\n---\n\n### Parameters\n\n*mandatory :*\n- `columns` (*str* or *list(str)*): list of columns to order\n\n*optional :*\n- `order` (*str* or *list(str)*): the ordering condition ('asc' for\nascending or 'desc' for descending). If not specified, 'asc' by default.\nIf a list of columns has been specified for the `columns` parameter,\nthe `order` parameter, if explicitly specified, must be a list of same\nlength as the `columns` list (if a string is specified, it will be\nreplicated in a list of same length of the `columns` list)\n\n---\n\n### Example\n\n**Input**\n\n| variable | value |\n|:--------:|:-----:|\n| A | 220 |\n| B | 200 |\n| C | 300 |\n| D | 100 |\n\n```cson\nsort:\n columns: 'value'\n```\n\n**Output**\n\n| variable | value |\n|:--------:|:-----:|\n| D | 100 |\n| B | 200 |\n| A | 220 |\n| C | 300 |", "id": "f4520:m0"} {"signature": "def read_entry(self, file_name):", "body": "file_path = os.path.join(self.EXTRACTION_CACHE_PATH, file_name)logger.info(f'')return joblib.load(file_path)", "docstring": "Args:\n file_name (str):\n\nReturns:\n pd.DataFrame:", "id": "f4522:c0:m10"} {"signature": "def write(self, dfs):", "body": "if not os.path.exists(self.EXTRACTION_CACHE_PATH):os.makedirs(self.EXTRACTION_CACHE_PATH)for name, df in dfs.items():file_path = os.path.join(self.EXTRACTION_CACHE_PATH, name)joblib.dump(df, filename=file_path)logger.info(f'')", "docstring": "Args:\n data (str | byte):\n\nReturns:\n dict: Dict[str, DataFrame]", "id": "f4522:c0:m11"} {"signature": "def extract(data):", "body": "_, tmp_file_path = tempfile.mkstemp()try:with open(tmp_file_path, '') as tmp_file:tmp_file.write(data)if zipfile.is_zipfile(tmp_file_path):return extract_zip(tmp_file_path)else:raise DataSdkError('')finally:shutil.rmtree(tmp_file_path, ignore_errors=True)", "docstring": "Args:\n data (str | byte):\n\nReturns:\n dict: Dict[str, DataFrame]", "id": "f4522:m1"} {"signature": "def __init__(self, namespace: str):", "body": "assert check_argument_types()super().__init__()self.namespace = namespace", "docstring": "You must specify an entry point namespace.", "id": "f4533:c0:m0"} {"signature": "def __missing__(self, key):", "body": "self[key] = load(key, self.namespace)return self[key]", "docstring": "If not already loaded, attempt to load the reference.", "id": "f4533:c0:m1"} {"signature": "def __getattr__(self, name):", "body": "return self[name]", "docstring": "Proxy attribute access through to the dictionary.", "id": "f4533:c0:m2"} {"signature": "def strongly_connected_components(graph: Graph) -> List:", "body": "assert check_argument_types()result = []stack = []low = {}def visit(node: str):if node in low: returnnum = len(low)low[node] = numstack_pos = len(stack)stack.append(node)for successor in graph[node]:visit(successor)low[node] = min(low[node], low[successor])if num == low[node]:component = tuple(stack[stack_pos:])del stack[stack_pos:]result.append(component)for item in component:low[item] = len(graph)for node in graph:visit(node)return result", "docstring": "Find the strongly connected components in a graph using Tarjan's algorithm.\n\n The `graph` argument should be a dictionary mapping node names to sequences of successor nodes.", "id": "f4535:m0"} {"signature": "def robust_topological_sort(graph: Graph) -> list:", "body": "assert check_argument_types()components = strongly_connected_components(graph)node_component = {}for component in components:for node in component:node_component[node] = componentcomponent_graph = {}for component in components:component_graph[component] = []for node in graph:node_c = node_component[node]for successor in graph[node]:successor_c = node_component[successor]if node_c != successor_c:component_graph[node_c].append(successor_c) return topological_sort(component_graph)", "docstring": "Identify strongly connected components then perform a topological sort of those components.", "id": "f4535:m2"} {"signature": "def lazyload(reference: str, *args, **kw):", "body": "assert check_argument_types()def lazily_load_reference(self):ref = referenceif ref.startswith(''):ref = traverse(self, ref[:])return load(ref, *args, **kw)return lazy(lazily_load_reference)", "docstring": "Lazily load and cache an object reference upon dereferencing.\n\n Assign the result of calling this function with either an object reference passed in positionally:\n\n class MyClass:\n debug = lazyload('logging:debug')\n\n Or the attribute path to traverse (using `marrow.package.loader:traverse`) prefixed by a period.\n\n class AnotherClass:\n target = 'logging:info'\n log = lazyload('.target')\n\n Additional arguments are passed to the eventual call to `load()`.", "id": "f4538:m0"} {"signature": "@abstractmethoddef get_code(self):", "body": "raise NotImplementedError", "docstring": "Returns current content of the tree.", "id": "f4557:c1:m3"} {"signature": "@staticmethoddef select_python_parser(parser=None):", "body": "if parser == '' or os.environ.get(''):PythonFile.Class = RedbaronPythonFileelse:PythonFile.Class = ParsoPythonFile", "docstring": "Select default parser for loading and refactoring steps. Passing `redbaron` as argument\nwill select the old paring engine from v0.3.3\n\nReplacing the redbaron parser was necessary to support Python 3 syntax. We have tried our\nbest to make sure there is no user impact on users. However, there may be regressions with\nnew parser backend.\n\nTo revert to the old parser implementation, add `GETGAUGE_USE_0_3_3_PARSER=true` property\nto the `python.properties` file in the `/env/default directory.\n\nThis property along with the redbaron parser will be removed in future releases.", "id": "f4557:c0:m1"} {"signature": "def refactor_step(self, old_text, new_text, move_param_from_idx):", "body": "diffs = []step, func = self._find_step_node(old_text)if step is None:return diffsstep_diff = self._refactor_step_text(step, old_text, new_text)diffs.append(step_diff)params_list_node = func.children[]moved_params = self._move_param_nodes(params_list_node.children, move_param_from_idx)if params_list_node.children is not moved_params:params_span = self._span_from_pos(params_list_node.children[].end_pos,params_list_node.children[-].start_pos)params_list_node.children = moved_paramsparam_code = ''.join(p.get_code() for p in moved_params[:-])diffs.append((params_span, param_code))return diffs", "docstring": "Find the step with old_text and change it to new_text.The step function\nparameters are also changed according to move_param_from_idx.\nEach entry in this list should specify parameter position from old.", "id": "f4563:c0:m10"} {"signature": "@staticmethoddef parse(file_path, content=None):", "body": "try:if content is None:with open(file_path) as f:content = f.read()py_tree = _parser.parse(content, path=file_path, error_recovery=False)return ParsoPythonFile(file_path, py_tree)except parso.parser.ParserSyntaxError as ex:logging.error(\"\", file_path,ex.error_leaf.line, ex.error_leaf.get_code())", "docstring": "Create a PythonFile object with specified file_path and content.\nIf content is None then, it is loaded from the file_path method.\nOtherwise, file_path is only used for reporting errors.", "id": "f4563:c0:m0"} {"signature": "def __init__(self, channel):", "body": "self.GetStepNames = channel.unary_unary('',request_serializer=messages__pb2.StepNamesRequest.SerializeToString,response_deserializer=messages__pb2.StepNamesResponse.FromString,)self.CacheFile = channel.unary_unary('',request_serializer=messages__pb2.CacheFileRequest.SerializeToString,response_deserializer=lsp__pb2.Empty.FromString,)self.GetStepPositions = channel.unary_unary('',request_serializer=messages__pb2.StepPositionsRequest.SerializeToString,response_deserializer=messages__pb2.StepPositionsResponse.FromString,)self.GetImplementationFiles = channel.unary_unary('',request_serializer=lsp__pb2.Empty.SerializeToString,response_deserializer=messages__pb2.ImplementationFileListResponse.FromString,)self.ImplementStub = channel.unary_unary('',request_serializer=messages__pb2.StubImplementationCodeRequest.SerializeToString,response_deserializer=messages__pb2.FileDiff.FromString,)self.ValidateStep = channel.unary_unary('',request_serializer=messages__pb2.StepValidateRequest.SerializeToString,response_deserializer=messages__pb2.StepValidateResponse.FromString,)self.Refactor = channel.unary_unary('',request_serializer=messages__pb2.RefactorRequest.SerializeToString,response_deserializer=messages__pb2.RefactorResponse.FromString,)self.GetStepName = channel.unary_unary('',request_serializer=messages__pb2.StepNameRequest.SerializeToString,response_deserializer=messages__pb2.StepNameResponse.FromString,)self.GetGlobPatterns = channel.unary_unary('',request_serializer=lsp__pb2.Empty.SerializeToString,response_deserializer=messages__pb2.ImplementationFileGlobPatternResponse.FromString,)self.KillProcess = channel.unary_unary('',request_serializer=messages__pb2.KillProcessRequest.SerializeToString,response_deserializer=lsp__pb2.Empty.FromString,)", "docstring": "Constructor.\n\n Args:\n channel: A grpc.Channel.", "id": "f4566:c0:m0"} {"signature": "def _iter_step_func_decorators(self):", "body": "for node in self.py_tree.find_all(''):for decorator in node.decorators:if decorator.name.value == '':yield node, decoratorbreak", "docstring": "Find functions with step decorator in parsed file.", "id": "f4573:c0:m3"} {"signature": "@staticmethoddef parse(file_path, content=None):", "body": "try:if content is None:with open(file_path) as f:content = f.read()py_tree = RedBaron(content)return RedbaronPythonFile(file_path, py_tree)except Exception as ex:msg = str(ex)marker = \"\"marker_pos = msg.find(marker)if marker_pos > :msg = msg[:marker_pos + len(marker)]logging.error(\"\".format(file_path, msg))", "docstring": "Create a PythonFile object with specified file_path and content.\nIf content is None then, it is loaded from the file_path method.\nOtherwise, file_path is only used for reporting errors.", "id": "f4573:c0:m0"} {"signature": "def alphanum_vld(value):", "body": "if not re.match(alphanum_pattern, value):raise ValidationError(\"\")", "docstring": "Check if value contains anything but alphanumerical chars\n\n :param value: the value to check\n :type value: str\n :returns: None\n :rtype: None\n :raises: ValidationError", "id": "f4582:m1"} {"signature": "def add_userrnd_shot(project):", "body": "rndseq = project.sequence_set.get(name=RNDSEQ_NAME)users = [u for u in project.users.all()]for user in users:shot, created = Shot.objects.get_or_create(name=user.username,project=project,sequence=rndseq,defaults={'': '' % user.username})for t in shot.tasks.all():t.users.add(user)t.full_clean()t.save()", "docstring": "Add a rnd shot for every user in the project\n\n :param project: the project that needs its rnd shots updated\n :type project: :class:`muke.models.Project`\n :returns: None\n :rtype: None\n :raises: None", "id": "f4584:m3"} {"signature": "@propertydef globalseq(self):", "body": "return self.sequence_set.get(name='')", "docstring": "Return the global sequence\n\n :returns: the global sequence of the project\n :rtype: :class:`muke.models.Sequence`\n :raises: None", "id": "f4584:c0:m2"} {"signature": "def add_default_sequences(project):", "body": "seqs = [(GLOBAL_NAME, '' % project.name),(RNDSEQ_NAME, '' % project.name)]for name, desc in seqs:seq, created = Sequence.objects.get_or_create(name=name, project=project, defaults={'': desc})", "docstring": "Add or create the default sequences for the given project\n\n :param project: the project that needs default sequences\n :type project: :class:`muke.models.Project`\n :returns: None\n :rtype: None\n :raises: None", "id": "f4584:m2"} {"signature": "def add_default_deps(project):", "body": "for name, short, order, af in DEFAULT_DEPARTMENTS:dep, created = Department.objects.get_or_create(name=name, short=short, ordervalue=order, assetflag=af)dep.projects.add(project)dep.full_clean()dep.save()", "docstring": "Add or create the default departments for the given project\n\n :param project: the project that needs default departments\n :type project: :class:`muke.models.Project`\n :returns: None\n :rtype: None\n :raises: None", "id": "f4584:m0"} {"signature": "@propertydef short(self):", "body": "return self.department.short", "docstring": "Return department short\n\n :returns: short of the department\n :rtype: str\n :raises: None", "id": "f4584:c4:m1"} {"signature": "def add_default_atypes(project):", "body": "for name, desc in DEFAULT_ASSETTYPES:at, created = Atype.objects.get_or_create(name=name, defaults={'': desc})at.projects.add(project)at.full_clean()at.save()", "docstring": "Add or create the default assettypes for the given project\n\n :param project: the project that needs default assettypes\n :type project: :class:`muke.models.Project`\n :returns: None\n :rtype: None\n :raises: None", "id": "f4584:m1"} {"signature": "def normalize_excludes(rootpath, excludes):", "body": "return [path.normpath(path.abspath(exclude)) for exclude in excludes]", "docstring": "Normalize the excluded directory list.", "id": "f4586:m9"} {"signature": "def write_file(name, text, opts):", "body": "fname = path.join(opts.destdir, '' % (name, opts.suffix))if opts.dryrun:print('' % fname)returnif not opts.force and path.isfile(fname):print('' % fname)else:print('' % fname)f = open(fname, '')try:f.write(text)finally:f.close()", "docstring": "Write the output file for module/package .", "id": "f4586:m1"} {"signature": "def recurse_tree(rootpath, excludes, opts):", "body": "if INITPY in os.listdir(rootpath):root_package = rootpath.split(path.sep)[-]else:root_package = Nonetoplevels = []followlinks = getattr(opts, '', False)includeprivate = getattr(opts, '', False)for root, subs, files in walk(rootpath, followlinks=followlinks):py_files = sorted(f for f in filesif path.splitext(f)[] in PY_SUFFIXES andnot is_excluded(path.join(root, f), excludes))is_pkg = INITPY in py_filesif is_pkg:py_files.remove(INITPY)py_files.insert(, INITPY)elif root != rootpath:del subs[:]continueif includeprivate:exclude_prefixes = ('',)else:exclude_prefixes = ('', '')subs[:] = sorted(sub for sub in subs if not sub.startswith(exclude_prefixes)and not is_excluded(path.join(root, sub), excludes))if is_pkg:if subs or len(py_files) > or notshall_skip(path.join(root, INITPY), opts):subpackage = root[len(rootpath):].lstrip(path.sep).replace(path.sep, '')create_package_file(root, root_package, subpackage,py_files, opts, subs)toplevels.append(makename(root_package, subpackage))else:assert root == rootpath and root_package is Nonefor py_file in py_files:if not shall_skip(path.join(rootpath, py_file), opts):module = path.splitext(py_file)[]create_module_file(root_package, module, opts)toplevels.append(module)return toplevels", "docstring": "Look for every file in the directory tree and create the corresponding\nReST files.", "id": "f4586:m8"} {"signature": "def setup_parser():", "body": "parser = optparse.OptionParser(usage=\"\"\"\"\"\")parser.add_option('', '', action='', dest='',help='', default='')parser.add_option('', '', action='', dest='',help='''', type='', default=)parser.add_option('', '', action='', dest='',help='')parser.add_option('', '', action='',dest='', default=False,help='''')parser.add_option('', '', action='', dest='',help='')parser.add_option('', '', action='',dest='',help='')parser.add_option('', '', action='',dest='',help='')parser.add_option('', '', action='', dest='',help='')parser.add_option('', '', action='',dest='',help='''''')parser.add_option('', '', action='', dest='',help='', default='')parser.add_option('', '', action='', dest='',help='')parser.add_option('', '', action='', dest='',help='')parser.add_option('', '', action='', dest='',type='',help='')parser.add_option('', '', action='', dest='',help='')parser.add_option('', '', action='', dest='',help='''')return parser", "docstring": "Sets up the argument parser and returns it\n\n :returns: the parser\n :rtype: :class:`optparse.OptionParser`\n :raises: None", "id": "f4586:m11"} {"signature": "def shall_skip(module, opts):", "body": "if path.getsize(module) <= :return Truefilename = path.basename(module)if filename != '' and filename.startswith('') andnot opts.includeprivate:return Truereturn False", "docstring": "Check if we want to skip this module.", "id": "f4586:m7"} {"signature": "def format_heading(level, text):", "body": "underlining = ['', '', '', ''][level - ] * len(text)return '' % (text, underlining)", "docstring": "Create a heading of [1, 2 or 3 supported].", "id": "f4586:m2"} {"signature": "def main(argv=sys.argv[:]):", "body": "parser = setup_argparse()args = parser.parse_args(argv)if args.gendochelp:sys.argv[] = ''genparser = gendoc.setup_parser()genparser.print_help()sys.exit()print('')print(''*)for odir in args.output:prepare_dir(odir, not args.nodelete)print('')print(''*)for i, idir in enumerate(args.input):if i >= len(args.output):odir = args.output[-]else:odir = args.output[i]run_gendoc(idir, odir, args.gendocargs)", "docstring": "Parse commandline arguments and run the tool\n\n :param argv: the commandline arguments.\n :type argv: list\n :returns: None\n :rtype: None\n :raises: None", "id": "f4588:m3"} {"signature": "def prepare_dir(directory, delete=True):", "body": "if os.path.exists(directory):if delete:assert directory != thisdir, ''print('' % directory)shutil.rmtree(directory)print('' % directory)os.mkdir(directory)else:print('' % directory)os.mkdir(directory)", "docstring": "Create apidoc dir, delete contents if delete is True.\n\n :param directory: the apidoc directory. you can use relative paths here\n :type directory: str\n :param delete: if True, deletes the contents of apidoc. This acts like an override switch.\n :type delete: bool\n :returns: None\n :rtype: None\n :raises: None", "id": "f4588:m1"} {"signature": "def way(self, w):", "body": "if w.id not in self.way_ids:returnway_points = []for n in w.nodes:try:way_points.append(Point(n.location.lon, n.location.lat))except o.InvalidLocationError:logging.debug('', w.id, n.ref)self.ways[w.id] = Way(w.id, way_points)", "docstring": "Process each way.", "id": "f4594:c0:m1"} {"signature": "@propertydef transit_route_types(self):", "body": "return ['', '', '', '', '', '', '']", "docstring": "A list of default OSM route types.\n\n Mainly train and bus types.", "id": "f4595:c0:m1"} {"signature": "def create_route_short_name(relation):", "body": "return relation.tags.get('') or ''", "docstring": "Create a meaningful route short name.", "id": "f4599:m2"} {"signature": "def haversine(lon1, lat1, lon2, lat2):", "body": "lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])dlon = lon2 - lon1dlat = lat2 - lat1a = sin(dlat / )** + cos(lat1) * cos(lat2) * sin(dlon / )**c = * asin(sqrt(a))r = return c * r", "docstring": "Calculate the great circle distance between two\npoints on the earth (specified in decimal degrees)", "id": "f4606:m7"} {"signature": "def is_consistent_type(theType, name, *constructionArgs):", "body": "assert theType.__name__ == nameassert isinstance(theType, type)instance = theType(*constructionArgs)assert type(instance) is theTypereturn True", "docstring": "Perform various assertions about *theType* to ensure that it is a\nwell-defined type. This is useful for extension types, where it's\npretty easy to do something wacky. If something about the type is\nunusual, an exception will be raised.\n\n:param theType: The type object about which to make assertions.\n:param name: A string giving the name of the type.\n:param constructionArgs: Positional arguments to use with\n *theType* to create an instance of it.", "id": "f4610:m0"} {"signature": "def anotherInstance(self):", "body": "raise NotImplementedError()", "docstring": "Return an instance of the class under test. Each call to this method\nmust return a different object. The objects must not be equal to the\nobjects returned by C{anInstance}. They may or may not be equal to\neach other (they will not be compared against each other).", "id": "f4610:c0:m1"} {"signature": "def anInstance(self):", "body": "raise NotImplementedError()", "docstring": "Return an instance of the class under test. Each call to this method\nmust return a different object. All objects returned must be equal to\neach other.", "id": "f4610:c0:m0"} {"signature": "def interact_in_memory(client_conn, server_conn):", "body": "wrote = Truewhile wrote:wrote = Falsefor (read, write) in [(client_conn, server_conn),(server_conn, client_conn)]:try:data = read.recv( ** )except WantReadError:passelse:return (read, data)while True:try:dirty = read.bio_read()except WantReadError:breakelse:wrote = Truewrite.bio_write(dirty)", "docstring": "Try to read application bytes from each of the two `Connection` objects.\nCopy bytes back and forth between their send/receive buffers for as long\nas there is anything to copy. When there is nothing more to copy,\nreturn `None`. If one of them actually manages to deliver some application\nbytes, return a two-tuple of the connection from which the bytes were read\nand the bytes themselves.", "id": "f4612:m10"} {"signature": "def _server_connection(self, callback, data):", "body": "ctx = Context(SSLv23_METHOD)ctx.use_privatekey(load_privatekey(FILETYPE_PEM, server_key_pem))ctx.use_certificate(load_certificate(FILETYPE_PEM, server_cert_pem))ctx.set_ocsp_server_callback(callback, data)server = Connection(ctx)server.set_accept_state()return server", "docstring": "Builds a server connection suitable for using OCSP.\n\n:param callback: The callback to register for OCSP.\n:param data: The opaque data object that will be handed to the\n OCSP callback.", "id": "f4612:c18:m1"} {"signature": "def join_bytes_or_unicode(prefix, suffix):", "body": "if type(prefix) == type(suffix):return join(prefix, suffix)if isinstance(prefix, text_type):return join(prefix, suffix.decode(getfilesystemencoding()))else:return join(prefix, suffix.encode(getfilesystemencoding()))", "docstring": "Join two path components of either ``bytes`` or ``unicode``.\n\nThe return type is the same as the type of ``prefix``.", "id": "f4612:m2"} {"signature": "def loopback(server_factory=None, client_factory=None):", "body": "if server_factory is None:server_factory = loopback_server_factoryif client_factory is None:client_factory = loopback_client_factory(server, client) = socket_pair()server = server_factory(server)client = client_factory(client)handshake(client, server)server.setblocking(True)client.setblocking(True)return server, client", "docstring": "Create a connected socket pair and force two connected SSL sockets\nto talk to each other via memory BIOs.", "id": "f4612:m9"} {"signature": "def check_recovery(self, p12_str, key=None, cert=None, ca=None, passwd=b\"\",extra=()):", "body": "if key:recovered_key = _runopenssl(p12_str, b\"\", b\"\", b\"\", b\"\",b\"\" + passwd, *extra)assert recovered_key[-len(key):] == keyif cert:recovered_cert = _runopenssl(p12_str, b\"\", b\"\", b\"\", b\"\",b\"\" + passwd, b\"\", *extra)assert recovered_cert[-len(cert):] == certif ca:recovered_cert = _runopenssl(p12_str, b\"\", b\"\", b\"\", b\"\",b\"\" + passwd, b\"\", *extra)assert recovered_cert[-len(ca):] == ca", "docstring": "Use openssl program to confirm three components are recoverable from a\nPKCS12 string.", "id": "f4617:c7:m6"} {"signature": "def _setBoundTest(self, which):", "body": "certificate = X509()set = getattr(certificate, '' + which)get = getattr(certificate, '' + which)assert get() is Nonewhen = b\"\"set(when)assert get() == whenwhen = b\"\"set(when)assert get() == whenwhen = b\"\"set(when)assert get() == whenwith pytest.raises(ValueError):set(b\"\")with pytest.raises(TypeError):set()with pytest.raises(TypeError):set(b\"\", b\"\")with pytest.raises(TypeError):get(b\"\")", "docstring": "`X509.set_notBefore` takes a string in the format of an\nASN1 GENERALIZEDTIME and sets the beginning of the certificate's\nvalidity period to it.", "id": "f4617:c5:m6"} {"signature": "def signable(self):", "body": "return X509Req()", "docstring": "Create and return a new `X509Req`.", "id": "f4617:c4:m0"} {"signature": "def signable(self):", "body": "raise NotImplementedError()", "docstring": "Return something with a `set_pubkey`, `set_pubkey`, and `sign` method.", "id": "f4617:c3:m0"} {"signature": "def gen_pkcs12(self, cert_pem=None, key_pem=None, ca_pem=None,friendly_name=None):", "body": "p12 = PKCS12()if cert_pem:ret = p12.set_certificate(load_certificate(FILETYPE_PEM, cert_pem))assert ret is Noneif key_pem:ret = p12.set_privatekey(load_privatekey(FILETYPE_PEM, key_pem))assert ret is Noneif ca_pem:ret = p12.set_ca_certificates((load_certificate(FILETYPE_PEM, ca_pem),))assert ret is Noneif friendly_name:ret = p12.set_friendlyname(friendly_name)assert ret is Nonereturn p12", "docstring": "Generate a PKCS12 object with components from PEM. Verify that the set\nfunctions return None.", "id": "f4617:c7:m5"} {"signature": "def read_file(*parts):", "body": "with codecs.open(os.path.join(HERE, *parts), \"\", \"\") as f:return f.read()", "docstring": "Build an absolute path from *parts* and return the contents of the\nresulting file. Assume UTF-8 encoding.", "id": "f4619:m0"} {"signature": "def main():", "body": "port = socket()port.setsockopt(SOL_SOCKET, SO_REUSEADDR, )port.bind(('', ))port.listen()print('', end=\"\")stdout.flush()server, addr = port.accept()print('', addr)server_context = Context(TLSv1_METHOD)server_context.set_tlsext_servername_callback(pick_certificate)server_ssl = Connection(server_context, server)server_ssl.set_accept_state()server_ssl.do_handshake()server.close()", "docstring": "Run an SNI-enabled server which selects between a few certificates in a\nC{dict} based on the handshake request it receives from a client.", "id": "f4622:m1"} {"signature": "def createKeyPair(type, bits):", "body": "pkey = crypto.PKey()pkey.generate_key(type, bits)return pkey", "docstring": "Create a public/private key pair.\n\nArguments: type - Key type, must be one of TYPE_RSA and TYPE_DSA\n bits - Number of bits to use in the key\nReturns: The public/private key pair in a PKey object", "id": "f4624:m0"} {"signature": "def createCertificate(req, issuerCertKey, serial, validityPeriod,digest=\"\"):", "body": "issuerCert, issuerKey = issuerCertKeynotBefore, notAfter = validityPeriodcert = crypto.X509()cert.set_serial_number(serial)cert.gmtime_adj_notBefore(notBefore)cert.gmtime_adj_notAfter(notAfter)cert.set_issuer(issuerCert.get_subject())cert.set_subject(req.get_subject())cert.set_pubkey(req.get_pubkey())cert.sign(issuerKey, digest)return cert", "docstring": "Generate a certificate given a certificate request.\n\nArguments: req - Certificate request to use\n issuerCert - The certificate of the issuer\n issuerKey - The private key of the issuer\n serial - Serial number for the certificate\n notBefore - Timestamp (relative to now) when the certificate\n starts being valid\n notAfter - Timestamp (relative to now) when the certificate\n stops being valid\n digest - Digest method to use for signing, default is sha256\nReturns: The signed certificate in an X509 object", "id": "f4624:m2"} {"signature": "def createCertRequest(pkey, digest=\"\", **name):", "body": "req = crypto.X509Req()subj = req.get_subject()for key, value in name.items():setattr(subj, key, value)req.set_pubkey(pkey)req.sign(pkey, digest)return req", "docstring": "Create a certificate request.\n\nArguments: pkey - The key to associate with the request\n digest - Digestion method to use for signing, default is sha256\n **name - The name of the subject of the request, possible\n arguments are:\n C - Country name\n ST - State or province name\n L - Locality name\n O - Organization name\n OU - Organizational unit name\n CN - Common name\n emailAddress - E-mail address\nReturns: The certificate request in an X509Req object", "id": "f4624:m1"} {"signature": "def accept(self):", "body": "c, a = self.__dict__[\"\"].accept()return (SSLWrapper(c), a)", "docstring": "This is the other part of the shutdown() workaround.\nSince servers create new sockets, we have to infect\nthem with our magic. :)", "id": "f4627:c0:m4"} {"signature": "def __init__(self, addr,requestHandler=SecureXMLRPCRequestHandler,logRequests=):", "body": "self.funcs = {}self.logRequests = logRequestsself.instance = NoneSecureTCPServer.__init__(self, addr, requestHandler)", "docstring": "This is the exact same code as SimpleXMLRPCServer.__init__\nexcept it calls SecureTCPServer.__init__ instead of plain\nold TCPServer.__init__", "id": "f4627:c3:m0"} {"signature": "def setup(self):", "body": "self.connection = self.request self.rfile = socket._fileobject(self.request, \"\", self.rbufsize)self.wfile = socket._fileobject(self.request, \"\", self.wbufsize)", "docstring": "We need to use socket._fileobject Because SSL.Connection\ndoesn't have a 'dup'. Not exactly sure WHY this is, but\nthis is backed up by comments in socket.py and SSL/connection.c", "id": "f4627:c2:m0"} {"signature": "def check_success(self):", "body": "small = range()for i in range(self.iterations):key = PKey()key.generate_key(TYPE_DSA, )for i in small:cert = X509()cert.set_pubkey(key)for i in small:cert.get_pubkey()", "docstring": "Call the method repeatedly such that it will return a PKey object.", "id": "f4628:c1:m1"} {"signature": "def check_load_privatekey_callback_wrong_type(self):", "body": "for i in range(self.iterations * ):try:load_privatekey(FILETYPE_PEM, self.ENCRYPTED_PEM,lambda *args: {})except ValueError:pass", "docstring": "Call the function with an encrypted PEM and a passphrase callback which\nreturns a non-string.", "id": "f4628:c2:m2"} {"signature": "def check_load_privatekey_callback(self):", "body": "for i in range(self.iterations * ):load_privatekey(FILETYPE_PEM, self.ENCRYPTED_PEM, lambda *args: \"\")", "docstring": "Call the function with an encrypted PEM and a passphrase callback.", "id": "f4628:c2:m0"} {"signature": "def make_assert(error):", "body": "def openssl_assert(ok):\"\"\"\"\"\"if ok is not True:exception_from_error_queue(error)return openssl_assert", "docstring": "Create an assert function that uses :func:`exception_from_error_queue` to\nraise an exception wrapped by *error*.", "id": "f4635:m2"} {"signature": "def path_string(s):", "body": "if isinstance(s, binary_type):return selif isinstance(s, text_type):return s.encode(sys.getfilesystemencoding())else:raise TypeError(\"\")", "docstring": "Convert a Python string to a :py:class:`bytes` string identifying the same\npath and which can be passed into an OpenSSL API accepting a filename.\n\n:param s: An instance of :py:class:`bytes` or :py:class:`unicode`.\n\n:return: An instance of :py:class:`bytes`.", "id": "f4635:m4"} {"signature": "def set_tlsext_host_name(self, name):", "body": "if not isinstance(name, bytes):raise TypeError(\"\")elif b\"\" in name:raise TypeError(\"\")_lib.SSL_set_tlsext_host_name(self._ssl, name)", "docstring": "Set the value of the servername extension to send in the client hello.\n\n:param name: A byte string giving the name.\n\n.. versionadded:: 0.13", "id": "f4636:c15:m6"} {"signature": "def get_cipher_version(self):", "body": "cipher = _lib.SSL_get_current_cipher(self._ssl)if cipher == _ffi.NULL:return Noneelse:version = _ffi.string(_lib.SSL_CIPHER_get_version(cipher))return version.decode(\"\")", "docstring": "Obtain the protocol version of the currently used cipher.\n\n:returns: The protocol name of the currently used cipher\n or :obj:`None` if no connection has been established.\n:rtype: :class:`unicode` or :class:`NoneType`\n\n.. versionadded:: 0.15", "id": "f4636:c15:m51"} {"signature": "def bio_write(self, buf):", "body": "buf = _text_to_bytes_and_warn(\"\", buf)if self._into_ssl is None:raise TypeError(\"\")result = _lib.BIO_write(self._into_ssl, buf, len(buf))if result <= :self._handle_bio_errors(self._into_ssl, result)return result", "docstring": "If the Connection was created with a memory BIO, this method can be\nused to add bytes to the read end of that memory BIO. The Connection\ncan then read the bytes (for example, in response to a call to\n:meth:`recv`).\n\n:param buf: The string to put into the memory BIO.\n:return: The number of bytes written", "id": "f4636:c15:m14"} {"signature": "def set_tlsext_use_srtp(self, profiles):", "body": "if not isinstance(profiles, bytes):raise TypeError(\"\")_openssl_assert(_lib.SSL_CTX_set_tlsext_use_srtp(self._context, profiles) == )", "docstring": "Enable support for negotiating SRTP keying material.\n\n:param bytes profiles: A colon delimited list of protection profile\n names, like ``b'SRTP_AES128_CM_SHA1_80:SRTP_AES128_CM_SHA1_32'``.\n:return: None", "id": "f4636:c14:m37"} {"signature": "def export_keying_material(self, label, olen, context=None):", "body": "outp = _no_zero_allocator(\"\", olen)context_buf = _ffi.NULLcontext_len = use_context = if context is not None:context_buf = contextcontext_len = len(context)use_context = success = _lib.SSL_export_keying_material(self._ssl, outp, olen,label, len(label),context_buf, context_len,use_context)_openssl_assert(success == )return _ffi.buffer(outp, olen)[:]", "docstring": "Obtain keying material for application use.\n\n:param: label - a disambiguating label string as described in RFC 5705\n:param: olen - the length of the exported key material in bytes\n:param: context - a per-association context value\n:return: the exported key material bytes or None", "id": "f4636:c15:m35"} {"signature": "def set_shutdown(self, state):", "body": "if not isinstance(state, integer_types):raise TypeError(\"\")_lib.SSL_set_shutdown(self._ssl, state)", "docstring": "Set the shutdown state of the Connection.\n\n:param state: bitvector of SENT_SHUTDOWN, RECEIVED_SHUTDOWN.\n:return: None", "id": "f4636:c15:m30"} {"signature": "def get_cert_store(self):", "body": "store = _lib.SSL_CTX_get_cert_store(self._context)if store == _ffi.NULL:return Nonepystore = X509Store.__new__(X509Store)pystore._store = storereturn pystore", "docstring": "Get the certificate store for the context. This can be used to add\n\"trusted\" certificates without using the\n:meth:`load_verify_locations` method.\n\n:return: A X509Store object or None if it does not have one.", "id": "f4636:c14:m33"} {"signature": "def raise_if_problem(self):", "body": "if self._problems:try:_raise_current_error()except Error:passraise self._problems.pop()", "docstring": "Raise an exception from the OpenSSL error queue or that was previously\ncaptured whe running a callback.", "id": "f4636:c6:m1"} {"signature": "def client_random(self):", "body": "session = _lib.SSL_get_session(self._ssl)if session == _ffi.NULL:return Nonelength = _lib.SSL_get_client_random(self._ssl, _ffi.NULL, )assert length > outp = _no_zero_allocator(\"\", length)_lib.SSL_get_client_random(self._ssl, outp, length)return _ffi.buffer(outp, length)[:]", "docstring": "Retrieve the random value used with the client hello message.\n\n:return: A string representing the state", "id": "f4636:c15:m33"} {"signature": "def get_cipher_list(self):", "body": "ciphers = []for i in count():result = _lib.SSL_get_cipher_list(self._ssl, i)if result == _ffi.NULL:breakciphers.append(_native(_ffi.string(result)))return ciphers", "docstring": "Retrieve the list of ciphers used by the Connection object.\n\n:return: A list of native cipher strings.", "id": "f4636:c15:m24"} {"signature": "def total_renegotiations(self):", "body": "return _lib.SSL_total_renegotiations(self._ssl)", "docstring": "Find out the total number of renegotiations.\n\n:return: The number of renegotiations.\n:rtype: int", "id": "f4636:c15:m18"} {"signature": "def makefile(self, *args, **kwargs):", "body": "raise NotImplementedError(\"\")", "docstring": "The makefile() method is not implemented, since there is no dup\nsemantics for SSL connections\n\n:raise: NotImplementedError", "id": "f4636:c15:m26"} {"signature": "def bio_read(self, bufsiz):", "body": "if self._from_ssl is None:raise TypeError(\"\")if not isinstance(bufsiz, integer_types):raise TypeError(\"\")buf = _no_zero_allocator(\"\", bufsiz)result = _lib.BIO_read(self._from_ssl, buf, bufsiz)if result <= :self._handle_bio_errors(self._from_ssl, result)return _ffi.buffer(buf, result)[:]", "docstring": "If the Connection was created with a memory BIO, this method can be\nused to read bytes from the write end of that memory BIO. Many\nConnection methods will add bytes which must be read in this manner or\nthe buffer will eventually fill up and the Connection will be able to\ntake no further actions.\n\n:param bufsiz: The maximum number of bytes to read\n:return: The string read.", "id": "f4636:c15:m13"} {"signature": "def __getattr__(self, name):", "body": "if self._socket is None:raise AttributeError(\"\" % (self.__class__.__name__, name))else:return getattr(self._socket, name)", "docstring": "Look up attributes on the wrapped socket object if they are not found\non the Connection object.", "id": "f4636:c15:m1"} {"signature": "def get_context(self):", "body": "return self._context", "docstring": "Retrieve the :class:`Context` object associated with this\n:class:`Connection`.", "id": "f4636:c15:m3"} {"signature": "def renegotiate(self):", "body": "if not self.renegotiate_pending():_openssl_assert(_lib.SSL_renegotiate(self._ssl) == )return Truereturn False", "docstring": "Renegotiate the session.\n\n:return: True if the renegotiation can be started, False otherwise\n:rtype: bool", "id": "f4636:c15:m15"} {"signature": "def request_ocsp(self):", "body": "rc = _lib.SSL_set_tlsext_status_type(self._ssl, _lib.TLSEXT_STATUSTYPE_ocsp)_openssl_assert(rc == )", "docstring": "Called to request that the server sends stapled OCSP data, if\navailable. If this is not called on the client side then the server\nwill not send OCSP data. Should be used in conjunction with\n:meth:`Context.set_ocsp_client_callback`.", "id": "f4636:c15:m57"} {"signature": "def get_verify_depth(self):", "body": "return _lib.SSL_CTX_get_verify_depth(self._context)", "docstring": "Retrieve the Context object's verify depth, as set by\n:meth:`set_verify_depth`.\n\n:return: The verify depth", "id": "f4636:c14:m22"} {"signature": "def _make_requires(flag, error):", "body": "def _requires_decorator(func):if not flag:@wraps(func)def explode(*args, **kwargs):raise NotImplementedError(error)return explodeelse:return funcreturn _requires_decorator", "docstring": "Builds a decorator that ensures that functions that rely on OpenSSL\nfunctions that are not present in this build raise NotImplementedError,\nrather than AttributeError coming out of cryptography.\n\n:param flag: A cryptography flag that guards the functions, e.g.\n ``Cryptography_HAS_NEXTPROTONEG``.\n:param error: The string to be used in the exception if the flag is false.", "id": "f4636:m3"} {"signature": "def get_app_data(self):", "body": "return self._app_data", "docstring": "Get the application data (supplied via :meth:`set_app_data()`)\n\n:return: The application data", "id": "f4636:c14:m31"} {"signature": "def b64_encode(self):", "body": "encoded = _lib.NETSCAPE_SPKI_b64_encode(self._spki)result = _ffi.string(encoded)_lib.OPENSSL_free(encoded)return result", "docstring": "Generate a base64 encoded representation of this SPKI object.\n\n:return: The base64 encoded string.\n:rtype: :py:class:`bytes`", "id": "f4638:c16:m3"} {"signature": "def load_certificate(type, buffer):", "body": "if isinstance(buffer, _text_type):buffer = buffer.encode(\"\")bio = _new_mem_buf(buffer)if type == FILETYPE_PEM:x509 = _lib.PEM_read_bio_X509(bio, _ffi.NULL, _ffi.NULL, _ffi.NULL)elif type == FILETYPE_ASN1:x509 = _lib.d2i_X509_bio(bio, _ffi.NULL)else:raise ValueError(\"\")if x509 == _ffi.NULL:_raise_current_error()return X509._from_raw_x509_ptr(x509)", "docstring": "Load a certificate (X509) from the string *buffer* encoded with the\ntype *type*.\n\n:param type: The file type (one of FILETYPE_PEM, FILETYPE_ASN1)\n\n:param bytes buffer: The buffer the certificate is stored in\n\n:return: The X509 object", "id": "f4638:m8"} {"signature": "def get_notBefore(self):", "body": "return self._get_boundary_time(_lib.X509_get_notBefore)", "docstring": "Get the timestamp at which the certificate starts being valid.\n\nThe timestamp is formatted as an ASN.1 TIME::\n\n YYYYMMDDhhmmssZ\n\n:return: A timestamp string, or ``None`` if there is none.\n:rtype: bytes or NoneType", "id": "f4638:c7:m18"} {"signature": "def get_elliptic_curves():", "body": "return _EllipticCurve._get_elliptic_curves(_lib)", "docstring": "Return a set of objects representing the elliptic curves supported in the\nOpenSSL build in use.\n\nThe curve objects have a :py:class:`unicode` ``name`` attribute by which\nthey identify themselves.\n\nThe curve objects are useful as values for the argument accepted by\n:py:meth:`Context.set_tmp_ecdh` to specify which elliptical curve should be\nused for ECDHE key exchange.", "id": "f4638:m6"} {"signature": "def add_cert(self, cert):", "body": "if not isinstance(cert, X509):raise TypeError()if _lib.X509_STORE_add_cert(self._store, cert._x509) == :code = _lib.ERR_peek_error()err_reason = _lib.ERR_GET_REASON(code)_openssl_assert(err_reason == _lib.X509_R_CERT_ALREADY_IN_HASH_TABLE)_lib.ERR_clear_error()", "docstring": "Adds a trusted certificate to this store.\n\nAdding a certificate with this method adds this certificate as a\n*trusted* certificate.\n\n:param X509 cert: The certificate to add to this store.\n\n:raises TypeError: If the certificate is not an :class:`X509`.\n\n:raises OpenSSL.crypto.Error: If OpenSSL was unhappy with your\n certificate.\n\n:return: ``None`` if the certificate was added successfully.", "id": "f4638:c9:m1"} {"signature": "def get_data(self):", "body": "octet_result = _lib.X509_EXTENSION_get_data(self._extension)string_result = _ffi.cast('', octet_result)char_result = _lib.ASN1_STRING_data(string_result)result_length = _lib.ASN1_STRING_length(string_result)return _ffi.buffer(char_result, result_length)[:]", "docstring": "Returns the data of the X509 extension, encoded as ASN.1.\n\n:return: The ASN.1 encoded data of this X509 extension.\n:rtype: :py:data:`bytes`\n\n.. versionadded:: 0.12", "id": "f4638:c5:m6"} {"signature": "def get_components(self):", "body": "result = []for i in range(_lib.X509_NAME_entry_count(self._name)):ent = _lib.X509_NAME_get_entry(self._name, i)fname = _lib.X509_NAME_ENTRY_get_object(ent)fval = _lib.X509_NAME_ENTRY_get_data(ent)nid = _lib.OBJ_obj2nid(fname)name = _lib.OBJ_nid2sn(nid)value = _ffi.buffer(_lib.ASN1_STRING_data(fval),_lib.ASN1_STRING_length(fval))[:]result.append((_ffi.string(name), value))return result", "docstring": "Returns the components of this name, as a sequence of 2-tuples.\n\n:return: The components of this name.\n:rtype: :py:class:`list` of ``name, value`` tuples.", "id": "f4638:c4:m7"} {"signature": "def has_expired(self):", "body": "time_string = _native(self.get_notAfter())not_after = datetime.datetime.strptime(time_string, \"\")return not_after < datetime.datetime.utcnow()", "docstring": "Check whether the certificate has expired.\n\n:return: ``True`` if the certificate has expired, ``False`` otherwise.\n:rtype: bool", "id": "f4638:c7:m16"} {"signature": "def _exception_from_context(self):", "body": "errors = [_lib.X509_STORE_CTX_get_error(self._store_ctx),_lib.X509_STORE_CTX_get_error_depth(self._store_ctx),_native(_ffi.string(_lib.X509_verify_cert_error_string(_lib.X509_STORE_CTX_get_error(self._store_ctx)))),]_x509 = _lib.X509_STORE_CTX_get_current_cert(self._store_ctx)_cert = _lib.X509_dup(_x509)pycert = X509._from_raw_x509_ptr(_cert)return X509StoreContextError(errors, pycert)", "docstring": "Convert an OpenSSL native context error failure into a Python\nexception.\n\nWhen a call to native OpenSSL X509_verify_cert fails, additional\ninformation about the failure can be obtained from the store context.", "id": "f4638:c11:m3"} {"signature": "@classmethoddef from_cryptography(cls, crypto_cert):", "body": "if not isinstance(crypto_cert, x509.Certificate):raise TypeError(\"\")cert = cls()cert._x509 = crypto_cert._x509return cert", "docstring": "Construct based on a ``cryptography`` *crypto_cert*.\n\n:param crypto_key: A ``cryptography`` X.509 certificate.\n:type crypto_key: ``cryptography.x509.Certificate``\n\n:rtype: X509\n\n.. versionadded:: 17.1.0", "id": "f4638:c7:m3"} {"signature": "def bits(self):", "body": "return _lib.EVP_PKEY_bits(self._pkey)", "docstring": "Returns the number of bits of the key\n\n:return: The number of bits of the key.", "id": "f4638:c2:m6"} {"signature": "def load_privatekey(type, buffer, passphrase=None):", "body": "if isinstance(buffer, _text_type):buffer = buffer.encode(\"\")bio = _new_mem_buf(buffer)helper = _PassphraseHelper(type, passphrase)if type == FILETYPE_PEM:evp_pkey = _lib.PEM_read_bio_PrivateKey(bio, _ffi.NULL, helper.callback, helper.callback_args)helper.raise_if_problem()elif type == FILETYPE_ASN1:evp_pkey = _lib.d2i_PrivateKey_bio(bio, _ffi.NULL)else:raise ValueError(\"\")if evp_pkey == _ffi.NULL:_raise_current_error()pkey = PKey.__new__(PKey)pkey._pkey = _ffi.gc(evp_pkey, _lib.EVP_PKEY_free)return pkey", "docstring": "Load a private key (PKey) from the string *buffer* encoded with the type\n*type*.\n\n:param type: The file type (one of FILETYPE_PEM, FILETYPE_ASN1)\n:param buffer: The buffer the key is stored in\n:param passphrase: (optional) if encrypted PEM format, this can be\n either the passphrase to use, or a callback for\n providing the passphrase.\n\n:return: The PKey object", "id": "f4638:m13"} {"signature": "def get_extension(self, index):", "body": "ext = X509Extension.__new__(X509Extension)ext._extension = _lib.X509_get_ext(self._x509, index)if ext._extension == _ffi.NULL:raise IndexError(\"\")extension = _lib.X509_EXTENSION_dup(ext._extension)ext._extension = _ffi.gc(extension, _lib.X509_EXTENSION_free)return ext", "docstring": "Get a specific extension of the certificate by index.\n\nExtensions on a certificate are kept in order. The index\nparameter selects which extension will be returned.\n\n:param int index: The index of the extension to retrieve.\n:return: The extension at the specified index.\n:rtype: :py:class:`X509Extension`\n:raises IndexError: If the extension index was out of bounds.\n\n.. versionadded:: 0.12", "id": "f4638:c7:m31"} {"signature": "def get_privatekey(self):", "body": "return self._pkey", "docstring": "Get the private key in the PKCS #12 structure.\n\n:return: The private key, or :py:const:`None` if there is none.\n:rtype: :py:class:`PKey`", "id": "f4638:c15:m3"} {"signature": "def type_is_signedAndEnveloped(self):", "body": "return bool(_lib.PKCS7_type_is_signedAndEnveloped(self._pkcs7))", "docstring": "Check if this NID_pkcs7_signedAndEnveloped object\n\n:returns: True if the PKCS7 is of type signedAndEnveloped", "id": "f4638:c14:m2"} {"signature": "def set_nextUpdate(self, when):", "body": "return self._set_boundary_time(_lib.X509_CRL_get_nextUpdate, when)", "docstring": "Set when the CRL will next be udpated.\n\nThe timestamp is formatted as an ASN.1 TIME::\n\n YYYYMMDDhhmmssZ\n\n.. versionadded:: 16.1.0\n\n:param bytes when: A timestamp string.\n:return: ``None``", "id": "f4638:c13:m9"} {"signature": "def get_issuer(self):", "body": "_issuer = _lib.X509_NAME_dup(_lib.X509_CRL_get_issuer(self._crl))_openssl_assert(_issuer != _ffi.NULL)_issuer = _ffi.gc(_issuer, _lib.X509_NAME_free)issuer = X509Name.__new__(X509Name)issuer._name = _issuerreturn issuer", "docstring": "Get the CRL's issuer.\n\n.. versionadded:: 16.1.0\n\n:rtype: X509Name", "id": "f4638:c13:m5"} {"signature": "def gmtime_adj_notAfter(self, amount):", "body": "if not isinstance(amount, int):raise TypeError(\"\")notAfter = _lib.X509_get_notAfter(self._x509)_lib.X509_gmtime_adj(notAfter, amount)", "docstring": "Adjust the time stamp on which the certificate stops being valid.\n\n:param int amount: The number of seconds by which to adjust the\n timestamp.\n:return: ``None``", "id": "f4638:c7:m14"} {"signature": "def set_reason(self, reason):", "body": "if reason is None:self._delete_reason()elif not isinstance(reason, bytes):raise TypeError(\"\")else:reason = reason.lower().replace(b'', b'')reason_code = [r.lower() for r in self._crl_reasons].index(reason)new_reason_ext = _lib.ASN1_ENUMERATED_new()_openssl_assert(new_reason_ext != _ffi.NULL)new_reason_ext = _ffi.gc(new_reason_ext, _lib.ASN1_ENUMERATED_free)set_result = _lib.ASN1_ENUMERATED_set(new_reason_ext, reason_code)_openssl_assert(set_result != _ffi.NULL)self._delete_reason()add_result = _lib.X509_REVOKED_add1_ext_i2d(self._revoked, _lib.NID_crl_reason, new_reason_ext, , )_openssl_assert(add_result == )", "docstring": "Set the reason of this revocation.\n\nIf :data:`reason` is ``None``, delete the reason instead.\n\n:param reason: The reason string.\n:type reason: :class:`bytes` or :class:`NoneType`\n\n:return: ``None``\n\n.. seealso::\n\n :meth:`all_reasons`, which gives you a list of all supported\n reasons which you might pass to this method.", "id": "f4638:c12:m4"} {"signature": "def get_notAfter(self):", "body": "return self._get_boundary_time(_lib.X509_get_notAfter)", "docstring": "Get the timestamp at which the certificate stops being valid.\n\nThe timestamp is formatted as an ASN.1 TIME::\n\n YYYYMMDDhhmmssZ\n\n:return: A timestamp string, or ``None`` if there is none.\n:rtype: bytes or NoneType", "id": "f4638:c7:m21"} {"signature": "def get_pubkey(self):", "body": "pkey = PKey.__new__(PKey)pkey._pkey = _lib.X509_REQ_get_pubkey(self._req)_openssl_assert(pkey._pkey != _ffi.NULL)pkey._pkey = _ffi.gc(pkey._pkey, _lib.EVP_PKEY_free)pkey._only_public = Truereturn pkey", "docstring": "Get the public key of the certificate signing request.\n\n:return: The public key.\n:rtype: :py:class:`PKey`", "id": "f4638:c6:m4"} {"signature": "def set_pubkey(self, pkey):", "body": "set_result = _lib.X509_REQ_set_pubkey(self._req, pkey._pkey)_openssl_assert(set_result == )", "docstring": "Set the public key of the certificate signing request.\n\n:param pkey: The public key to use.\n:type pkey: :py:class:`PKey`\n\n:return: ``None``", "id": "f4638:c6:m3"} {"signature": "def all_reasons(self):", "body": "return self._crl_reasons[:]", "docstring": "Return a list of all the supported reason strings.\n\nThis list is a copy; modifying it does not change the supported reason\nstrings.\n\n:return: A list of reason strings.\n:rtype: :class:`list` of :class:`bytes`", "id": "f4638:c12:m6"} {"signature": "def get_ca_certificates(self):", "body": "if self._cacerts is not None:return tuple(self._cacerts)", "docstring": "Get the CA certificates in the PKCS #12 structure.\n\n:return: A tuple with the CA certificates in the chain, or\n :py:const:`None` if there are none.\n:rtype: :py:class:`tuple` of :py:class:`X509` or :py:const:`None`", "id": "f4638:c15:m5"} {"signature": "@classmethoddef from_nid(cls, lib, nid):", "body": "return cls(lib, nid, _ffi.string(lib.OBJ_nid2sn(nid)).decode(\"\"))", "docstring": "Instantiate a new :py:class:`_EllipticCurve` associated with the given\nOpenSSL NID.\n\n:param lib: The OpenSSL library binding object.\n\n:param nid: The OpenSSL NID the resulting curve object will represent.\n This must be a curve NID (and not, for example, a hash NID) or\n subsequent operations will fail in unpredictable ways.\n:type nid: :py:class:`int`\n\n:return: The curve object.", "id": "f4638:c3:m2"} {"signature": "def get_critical(self):", "body": "return _lib.X509_EXTENSION_get_critical(self._extension)", "docstring": "Returns the critical field of this X.509 extension.\n\n:return: The critical field.", "id": "f4638:c5:m4"} {"signature": "def get_serial_number(self):", "body": "asn1_serial = _lib.X509_get_serialNumber(self._x509)bignum_serial = _lib.ASN1_INTEGER_to_BN(asn1_serial, _ffi.NULL)try:hex_serial = _lib.BN_bn2hex(bignum_serial)try:hexstring_serial = _ffi.string(hex_serial)serial = int(hexstring_serial, )return serialfinally:_lib.OPENSSL_free(hex_serial)finally:_lib.BN_free(bignum_serial)", "docstring": "Return the serial number of this certificate.\n\n:return: The serial number.\n:rtype: int", "id": "f4638:c7:m13"} {"signature": "def to_cryptography(self):", "body": "from cryptography.hazmat.backends.openssl.x509 import (_CertificateRevocationList)backend = _get_backend()return _CertificateRevocationList(backend, self._crl)", "docstring": "Export as a ``cryptography`` CRL.\n\n:rtype: ``cryptography.x509.CertificateRevocationList``\n\n.. versionadded:: 17.1.0", "id": "f4638:c13:m1"} {"signature": "def set_friendlyname(self, name):", "body": "if name is None:self._friendlyname = Noneelif not isinstance(name, bytes):raise TypeError(\"\" % (name,))self._friendlyname = name", "docstring": "Set the friendly name in the PKCS #12 structure.\n\n:param name: The new friendly name, or :py:const:`None` to unset.\n:type name: :py:class:`bytes` or :py:const:`None`\n\n:return: ``None``", "id": "f4638:c15:m7"} {"signature": "def add(buffer, entropy):", "body": "if not isinstance(buffer, bytes):raise TypeError(\"\")if not isinstance(entropy, int):raise TypeError(\"\")_lib.RAND_add(buffer, len(buffer), entropy)", "docstring": "Mix bytes from *string* into the PRNG state.\n\nThe *entropy* argument is (the lower bound of) an estimate of how much\nrandomness is contained in *string*, measured in bytes.\n\nFor more information, see e.g. :rfc:`1750`.\n\nThis function is only relevant if you are forking Python processes and\nneed to reseed the CSPRNG after fork.\n\n:param buffer: Buffer with random data.\n:param entropy: The entropy (in bytes) measurement of the buffer.\n\n:return: :obj:`None`", "id": "f4639:m0"} {"signature": "def transform(self, X):", "body": "return X[:, self.get_support()]", "docstring": "Transform to select not k best feature\n:param X: np.matrix", "id": "f4645:c0:m2"} {"signature": "def to_csv(self, X, y):", "body": "raise NotImplementedError", "docstring": "Writes dataset to csv.", "id": "f4648:c0:m2"} {"signature": "def from_csv(self, label_column=''):", "body": "df = pd.read_csv(self.path, header=)X = df.loc[:, df.columns != label_column].to_dict('')X = map_dict_list(X, if_func=lambda k, v: v and math.isfinite(v))y = list(df[label_column].values)return X, y", "docstring": "Read dataset from csv.", "id": "f4648:c0:m1"} {"signature": "def feature_importance_report(X,y,threshold=,correcting_multiple_hypotesis=True,method='',alpha=,sort_by=''):", "body": "df = variance_threshold_on_df(pd.DataFrame.from_records(X), threshold=threshold)F, pvals = f_classif(df.values, y)if correcting_multiple_hypotesis:_, pvals, _, _ = multipletests(pvals, alpha=alpha, method=method)df[''] = ydf_mean = df.groupby('').mean().Tdf_mean[''] = Fdf_mean[''] = pvalsreturn df_mean.sort_values(sort_by, ascending=True)", "docstring": "Provide signifance for features in dataset with anova using multiple hypostesis testing\n\n:param X: List of dict with key as feature names and values as features\n:param y: Labels\n:param threshold: Low-variens threshold to eliminate low varience features\n:param correcting_multiple_hypotesis: corrects p-val with multiple hypotesis testing\n:param method: method of multiple hypotesis testing\n:param alpha: alpha of multiple hypotesis testing\n:param sort_by: sorts output dataframe by pval or F\n:return: DataFrame with F and pval for each feature with their average values", "id": "f4651:m6"} {"signature": "def average_by_label(X, y, ref_label):", "body": "return defaultdict(float,pd.DataFrame.from_records(filter_by_label(X, y, ref_label)[]).mean().to_dict())", "docstring": "Calculates average dictinary from list of dictionary for give label\n\n:param List[Dict] X: dataset\n:param list y: labels\n:param ref_label: reference label", "id": "f4651:m1"} {"signature": "def __init__(self, names, case_sensetive=False):", "body": "self.names = names if case_sensetive else map_dict(names, key_func=lambda k, v: k.lower())", "docstring": ":names: dict which contain old feature names as key and new names as value. \n:case_insensetive: performs mactching case sensetive", "id": "f4656:c0:m0"} {"signature": "def __init__(self, reference_label):", "body": "super().__init__()self.reference_label = reference_label", "docstring": ":reference_label: the label scaling will be performed by.", "id": "f4660:c0:m0"} {"signature": "def partial_fit(self, X, y):", "body": "X, y = filter_by_label(X, y, self.reference_label)super().partial_fit(X, y)return self", "docstring": ":X: {array-like, sparse matrix}, shape [n_samples, n_features]\n The data used to compute the mean and standard deviation\n used for later scaling along the features axis.\n:y: Healthy 'h' or 'sick_name'", "id": "f4660:c0:m1"} {"signature": "def get_status(options):", "body": "payload = { \"\": options.username,\"\": options.password,\"\": options.server,\"\": options.port,}try:if options.server.startswith(\"\") and stat.S_ISSOCK(os.stat(options.server).st_mode): try:import supervisor.xmlrpcexcept ImportError as error:sys.stderr.write(\"\".format(error=error))sys.stderr.write(\"\")sys.exit(-)if all([options.username, options.password, ]): connection = xmlrpclib.ServerProxy(\"\", transport=supervisor.xmlrpc.SupervisorTransport(options.username, options.password, serverurl=URI[URI_TPL_SOCKET].format(**payload)))else:connection = xmlrpclib.ServerProxy(\"\", transport=supervisor.xmlrpc.SupervisorTransport(None, None, serverurl=URI[URI_TPL_SOCKET].format(**payload)))else: if all([options.username, options.password, ]): connection = xmlrpclib.Server(URI[URI_TPL_HTTP_AUTH].format(**payload))else:connection = xmlrpclib.Server(URI[URI_TPL_HTTP].format(**payload))return connection.supervisor.getAllProcessInfo()except Exception as error:if not options.quiet:sys.stdout.write(\"\".format(error=error))sys.exit(EXIT_CODES.get(options.network_errors_exit_code, EXIT_CODE_UNKNOWN))", "docstring": "Get programs statuses.\n\n:param options: parsed commandline arguments.\n:type options: optparse.Values.\n:return: supervisord XML-RPC call result.\n:rtype: dict.", "id": "f4668:m1"} {"signature": "def main():", "body": "options = parse_options()output, code = create_output(get_status(options), options)sys.stdout.write(output)sys.exit(code)", "docstring": "Program main.", "id": "f4668:m3"} {"signature": "def only_module(*modules):", "body": "modules = (modules and isinstance(modules[], list)) andmodules[] or modulesfor module in modules:if not module in ONLY_MODULES:ONLY_MODULES.append(module)traceback.extract_tb = _new_extract_tb", "docstring": "This will exclude all modules from the traceback except these \"modules\"\n:param modules: list of modules to report in traceback\n:return: None", "id": "f4672:m1"} {"signature": "def add_handler(log_handler_level, handler, formatter=None, log_filter=None):", "body": "handler.setLevel(log_handler_level)if formatter is not None:handler.setFormatter(formatter)if log_filter is not None:handler.addFilter(log_filter)log.addHandler(handler)", "docstring": ":param log_handler_level: str of the level to set for the handler\n:param handler: logging.Handler handler to add\n:param formatter: logging.Formatter instance to use\n:param log_filter: logging.filter instance to add to handler\n:return: None", "id": "f4673:m5"} {"signature": "def setup_file_logging(log_filename, log_file_level=\"\", str_format=None,date_format=None, log_restart=False, log_history=False,formatter=None, silence_modules=None, log_filter=None):", "body": "from seaborn_timestamp.timestamp import datetime_to_strif os.path.exists(log_filename) and log_restart:os.remove(log_filename)add_file_handler(log_file_level, log_filename, str_format=str_format,date_format=date_format, formatter=formatter,log_filter=log_filter)if log_history:base_name = os.path.basename(log_filename).split('')[] +'' % datetime_to_str(str_format='')history_log = os.path.join(os.path.dirname(log_filename),'', base_name + '')add_file_handler(log_file_level, history_log, str_format=str_format,date_format=date_format, log_filter=log_filter)silence_module_logging(silence_modules)", "docstring": "This will setup logging for a single file but can be called more than once\nLOG LEVELS are \"CRITICAL\", \"ERROR\", \"INFO\", \"DEBUG\"\n:param log_filename: str of the file location\n:param log_file_level str of the log level to use on this file\n:param str_format: str of the logging format\n:param date_format: str of the date format\n:param log_restart: bool if True the log file will be deleted first\n:param log_history: bool if True will save another log file in a folder\n called history with the datetime\n:param formatter: logging.Format instance to use\n:param log_filter: logging.filter instance to add to handler\n:param silence_modules list of str of modules to silence\n:return: None", "id": "f4673:m3"} {"signature": "def _prune_dict_null_str(dictionary):", "body": "for key, value in list(dictionary.items()):if value is None or str(value) == '':del dictionary[key]if isinstance(value, dict):dictionary[key] = _prune_dict_null_str(dictionary[key])return dictionary", "docstring": "Prune the \"None\" or emptry string values from dictionary items", "id": "f4682:m5"} {"signature": "def create_tfs_git_client(url, token=None):", "body": "if token is None:token = os.environ.get('', None)tfs_connection = create_tfs_connection(url, token)tfs_git_client = tfs_connection.get_client('')if tfs_git_client is None:msg = ''raise RuntimeError(msg, url)return tfs_git_client", "docstring": "Creates a TFS Git Client to pull Git repo info", "id": "f4684:m4"} {"signature": "def process_url(url, key):", "body": "logger.debug('', url)if key is None:raise ValueError('')response = requests.get(url, headers={\"\": \"\" + key})doecode_json = response.json()for record in doecode_json['']:yield record", "docstring": "Yields DOE CODE records from a DOE CODE .json URL response\nConverts a DOE CODE API .json URL response into DOE CODE projects", "id": "f4687:m1"} {"signature": "def process_json(filename):", "body": "logger.debug('', filename)doecode_json = json.load(open(filename))for record in doecode_json['']:yield record", "docstring": "Converts a DOE CODE .json file into DOE CODE projects\nYields DOE CODE records from a DOE CODE .json file", "id": "f4687:m0"} {"signature": "@classmethoddef from_stashy(klass, repository, labor_hours=True):", "body": "if not isinstance(repository, dict):raise TypeError('')project = klass()logger.debug('',repository[''],repository[''][''],)project[''] = repository['']clone_urls = [clone[''] for clone in repository['']['']]for url in clone_urls:if url.startswith(''):project[''] = urlbreakdescription = repository[''].get('', '')if description:project[''] = '' % descriptionproject[''][''] = Noneweb_url = repository[''][''][]['']public_server = web_url.startswith('')if repository[''] and public_server:project[''][''] = ''if labor_hours:project[''] = labor_hours_from_url(project[''])else:project[''] = project[''] = ['']project[''][''] = ''project[''][''] = repository[''][''][]['']project[''] = ''project[''] = repository['']project[''] = repository[''][''][]['']_prune_dict_null_str(project)return project", "docstring": "Handles crafting Code.gov Project for Bitbucket Server repositories", "id": "f4689:c1:m3"} {"signature": "@classmethoddef from_doecode(klass, record):", "body": "if not isinstance(record, dict):raise TypeError('')project = klass()project[''] = record['']logger.debug('', record[''])link = record.get('', '')if not link:link = record.get('')logger.warning('', link)project[''] = linkproject[''] = record['']licenses = set(record[''])licenses.discard(None)logger.debug('', licenses)license_objects = []if '' in licenses:licenses.remove('')license_objects = [{'': '','': record['']}]if licenses:license_objects.extend([_license_obj(license) for license in licenses])project[''][''] = license_objectsif record['']:usage_type = ''else:usage_type = ''project[''][''] = ''project[''][''] = usage_typeproject[''] = project[''] = ['']lab_name = record.get('')if lab_name is not None:project[''].append(lab_name)project[''][''] = record['']if '' in record and record['']:project[''] = record['']if lab_name is not None:project[''] = lab_namestatus = record.get('')if status is None:raise ValueError('')elif status:status = ''else:status = ''project[''] = statusvcs = Nonelink = project['']if '' in link:vcs = ''if vcs is None:logger.debug('', project[''], link)vcs = ''if vcs:project[''] = vcsurl = record.get('', '')if url:project[''] = urlif '' in record:project[''] = record['']if '' in record and '' in record:project[''] = {'': record[''],'': record['']}return project", "docstring": "Create CodeGovProject object from DOE CODE record\n\nHandles crafting Code.gov Project", "id": "f4689:c1:m4"} {"signature": "@classmethoddef from_gitlab(klass, repository, labor_hours=True):", "body": "if not isinstance(repository, gitlab.v4.objects.Project):raise TypeError('')project = klass()logger.debug('',repository.id,repository.path_with_namespace,)project[''] = repository.nameproject[''] = repository.http_url_to_repoproject[''] = repository.descriptionproject[''][''] = Noneweb_url = repository.web_urlpublic_server = web_url.startswith('')if repository.visibility in ('') and public_server:project[''][''] = ''elif date_parse(repository.created_at) < POLICY_START_DATE:project[''][''] = ''if labor_hours:project[''] = labor_hours_from_url(project[''])else:project[''] = project[''] = [''] + repository.tag_listproject[''] = {'': '','': web_url,}project[''] = repository.namespace['']project[''] = ''project[''] = ''project[''] = repository.web_urlapi_url = repository.manager.gitlab._urlarchive_suffix = '' % repository.get_id()project[''] = api_url + archive_suffixproject[''] = {'': date_parse(repository.created_at).date().isoformat(),'': date_parse(repository.last_activity_at).date().isoformat(),'': '',}_prune_dict_null_str(project)return project", "docstring": "Create CodeGovProject object from GitLab Repository", "id": "f4689:c1:m2"} {"signature": "def _license_obj(license):", "body": "obj = Noneif license in ('', ''):obj = {'': '','': ''}elif license in (''):obj = {'': '','': ''}elif license in (''):obj = {'': '','': ''}elif license in (''):obj = {'': '','': ''}elif license in (''):obj = {'': '','': ''}elif license in (''):obj = {'': '','': ''}elif license in (''):obj = {'': '','': ''}elif license in (''):obj = {'': '','': ''}elif license in (''):obj = {'': '','': ''}elif license in (''):obj = {'': '','': '',}elif license in (''):obj = {'': '','': '',}elif license in (''):obj = {'': '','': '',}elif license in (''):obj = {'': '','': '',}elif license in (''):obj = {'': '','': '',}if obj is None:logger.warn('', license)raise ValueError('')return obj", "docstring": "A helper function to look up license object information\n\nUse names from: https://api.github.com/licenses", "id": "f4690:m0"} {"signature": "def queryGitHub(self, gitquery, gitvars={}, verbosity=, paginate=False, cursorVar=None, keysToList=[], rest=False, requestCount=, pageNum=):", "body": "requestCount += if pageNum < : pageNum = pageNum += if paginate:_vPrint((verbosity >= ), \"\" % (pageNum))_vPrint((verbosity >= ), \"\" % (\"\" if rest else \"\"))response = self._submitQuery(gitquery, gitvars=gitvars, verbose=(verbosity > ), rest=rest)_vPrint((verbosity >= ), \"\")_vPrint((verbosity >= ), response[\"\"][\"\"])statusNum = response[\"\"]pageNum -= try:apiStatus = {\"\": int(response[\"\"][\"\"]),\"\": int(response[\"\"][\"\"]),\"\": int(response[\"\"][\"\"])}_vPrint((verbosity >= ), \"\" % (json.dumps(apiStatus)))if not apiStatus[\"\"] > :_vPrint((verbosity >= ), \"\")self._awaitReset(apiStatus[\"\"])_vPrint((verbosity >= ), \"\")return self.queryGitHub(gitquery, gitvars=gitvars, verbosity=verbosity, paginate=paginate, cursorVar=cursorVar, keysToList=keysToList, rest=rest, requestCount=(requestCount - ), pageNum=pageNum)except KeyError:_vPrint((verbosity >= ), \"\")if statusNum == :if requestCount >= self.maxRetry:raise RuntimeError(\"\" % (self.maxRetry, response[\"\"][\"\"], response[\"\"]))else:self._countdown(self.__retryDelay, printString=\"\", verbose=(verbosity >= ))return self.queryGitHub(gitquery, gitvars=gitvars, verbosity=verbosity, paginate=paginate, cursorVar=cursorVar, keysToList=keysToList, rest=rest, requestCount=requestCount, pageNum=pageNum)if statusNum == or statusNum == :if requestCount >= self.maxRetry:raise RuntimeError(\"\" % (self.maxRetry, response[\"\"][\"\"], response[\"\"]))else:self._countdown(self.__retryDelay, printString=\"\", verbose=(verbosity >= ))return self.queryGitHub(gitquery, gitvars=gitvars, verbosity=verbosity, paginate=paginate, cursorVar=cursorVar, keysToList=keysToList, rest=rest, requestCount=requestCount, pageNum=pageNum)if statusNum >= or statusNum == :raise RuntimeError(\"\" % (response[\"\"][\"\"], response[\"\"]))_vPrint((verbosity >= ), \"\")outObj = json.loads(response[\"\"])if not rest and \"\" in outObj:if requestCount >= self.maxRetry:raise RuntimeError(\"\" % (self.maxRetry, response[\"\"][\"\"], response[\"\"]))elif len(outObj[\"\"]) == and len(outObj[\"\"][]) == :_vPrint((verbosity >= ), \"\" % (json.dumps(outObj[\"\"])))self._countdown(self.__retryDelay, printString=\"\", verbose=(verbosity >= ))return self.queryGitHub(gitquery, gitvars=gitvars, verbosity=verbosity, paginate=paginate, cursorVar=cursorVar, keysToList=keysToList, rest=rest, requestCount=requestCount, pageNum=pageNum)else:raise RuntimeError(\"\" % (json.dumps(outObj[\"\"])))pageNum += if paginate:if rest and response[\"\"]:if \"\" in response[\"\"]:nextObj = self.queryGitHub(response[\"\"][\"\"], gitvars=gitvars, verbosity=verbosity, paginate=paginate, cursorVar=cursorVar, keysToList=keysToList, rest=rest, requestCount=, pageNum=pageNum)outObj.extend(nextObj)elif not rest:if not cursorVar:raise ValueError(\"\")if not len(keysToList) > :raise ValueError(\"\")aPage = outObjfor key in keysToList[:-]:aPage = aPage[key]gitvars[cursorVar] = aPage[\"\"][\"\"]if aPage[\"\"][\"\"]:nextObj = self.queryGitHub(gitquery, gitvars=gitvars, verbosity=verbosity, paginate=paginate, cursorVar=cursorVar, keysToList=keysToList, rest=rest, requestCount=, pageNum=pageNum)newPage = nextObjfor key in keysToList[:-]:newPage = newPage[key]aPage[keysToList[-]].extend(newPage[keysToList[-]])aPage.pop(\"\", None)return outObj", "docstring": "Submit a GitHub query.\n\n Args:\n gitquery (str): The query or endpoint itself.\n Examples:\n query: 'query { viewer { login } }'\n endpoint: '/user'\n gitvars (Optional[Dict]): All query variables.\n Defaults to empty.\n GraphQL Only.\n verbosity (Optional[int]): Changes output verbosity levels.\n If < 0, all extra printouts are suppressed.\n If == 0, normal print statements are displayed.\n If > 0, additional status print statements are displayed.\n Defaults to 0.\n paginate (Optional[bool]): Pagination will be completed\n automatically if True. Defaults to False.\n cursorVar (Optional[str]): Key in 'gitvars' that represents the\n pagination cursor. Defaults to None.\n GraphQL Only.\n keysToList (Optional[List[str]]): Ordered list of keys needed to\n retrieve the list in the query results to be extended by\n pagination. Defaults to empty.\n Example:\n ['data', 'viewer', 'repositories', 'nodes']\n GraphQL Only.\n rest (Optional[bool]): If True, uses the REST API instead\n of GraphQL. Defaults to False.\n requestCount (Optional[int]): Counter for repeated requests.\n pageNum (Optional[int]): Counter for pagination.\n For user readable log messages only, does not affect data.\n\n Returns:\n Dict: A JSON style dictionary.", "id": "f4691:c0:m5"} {"signature": "def __init__(self, filePath=None, loadData=False):", "body": "self.data = {}\"\"\"\"\"\"self.filePath = filePathif loadData:self.fileLoad(updatePath=False)", "docstring": "Initialize the DataManager object.\n Args:\n filePath (Optional[str]): Relative or absolute path to a JSON\n data file. Defaults to None.\n loadData (Optional[bool]): Loads data from the given file path\n if True. Defaults to False.", "id": "f4691:c1:m0"} {"signature": "def __init__(self, apiToken=None, maxRetry=):", "body": "if apiToken:self.__githubApiToken = apiTokenelse:try:self.__githubApiToken = os.environ['']except KeyError as error:raise TypeError(\"\") from errorprint(\"\", end=\"\", flush=True)basicCheck = self._submitQuery('')if basicCheck[\"\"] == :print(\"\")raise ValueError(\"\" + basicCheck[\"\"][\"\"] + \"\" + basicCheck[\"\"])else:print(\"\")self.__retryDelay = self.__query = None self.__queryPath = None self.__queryTimestamp = None self.maxRetry = maxRetryself.data = {}\"\"\"\"\"\"", "docstring": "Initialize the GitHubQueryManager object.\n\n Note:\n If no apiToken argument is provided,\n the environment variable 'GITHUB_API_TOKEN' must be set.\n\n Args:\n apiToken (Optional[str]): A string representing a GitHub API\n token. Defaults to None.\n maxRetry (Optional[int]): A limit on how many times to\n automatically retry requests. Defaults to 10.\n\n Raises:\n TypeError: If no GitHub API token is provided either via\n argument or environment variable 'GITHUB_API_TOKEN'.", "id": "f4691:c0:m0"} {"signature": "@propertydef maxRetry(self):", "body": "return self.__maxRetry", "docstring": "int: A limit on how many times to automatically retry requests.\n\n Must be a whole integer greater than 0.", "id": "f4691:c0:m1"} {"signature": "def create_session(token=None):", "body": "if token is None:token = os.environ.get('', None)gh_session = github3.login(token=token)if gh_session is None:raise RuntimeError('')return gh_session", "docstring": "Create a github3.py session connected to GitHub.com\n\nIf token is not provided, will attempt to use the GITHUB_API_TOKEN\nenvironment variable if present.", "id": "f4692:m1"} {"signature": "def connect(url='', token=None):", "body": "gh_session = Noneif url == '':gh_session = create_session(token)else:gh_session = create_enterprise_session(url, token)if gh_session is None:msg = ''raise RuntimeError(msg, url)logger.info('', url)return gh_session", "docstring": "Create a GitHub session for making requests", "id": "f4692:m5"} {"signature": "def connect(url, username, password):", "body": "bb_session = stashy.connect(url, username, password)logger.info('', url, username)return bb_session", "docstring": "Return a connected Bitbucket session", "id": "f4693:m0"} {"signature": "def all_repos(bb_session):", "body": "for repo in bb_session.repos.all():yield repo", "docstring": "Yields Stashy repo objects for all projects in Bitbucket", "id": "f4693:m1"} {"signature": "def prompt_2fa(self):", "body": "code = ''while not code:code = input('')return code", "docstring": "Taken from\nhttp://github3py.readthedocs.io/en/master/examples/two_factor_auth.html\nPrompts a user for their 2FA code and returns it.", "id": "f4696:c0:m3"} {"signature": "def write_repo_json(self, date=(datetime.date.today()),organization='', dict_to_write={}, path_ending_type='',is_list=False, is_dict=False):", "body": "for repo in dict_to_write:path = ('' + organization + '' + repo + '' +path_ending_type + '' + str(date) + '')self.checkDir(path)with open(path, '') as out:if is_list:out.write('')for value in dict_to_write[repo]:if is_dict:for inner_dict in value:out.write(json.dumps(inner_dict, sort_keys=True,indent=, separators=('', '')) + '')else:out.write(json.dumps(value, sort_keys=True,indent=, separators=('', '')) + '')out.seek(-, os.SEEK_END)out.truncate()out.write('')else:out.write(json.dumps(dict_to_write[repo], sort_keys=True,indent=, separators=('', '')))out.close()", "docstring": "#Writes repo specific data to JSON.", "id": "f4697:c0:m16"} {"signature": "def get_languages(self, repo, temp_repo):", "body": "try:self.languages[repo.language] += except KeyError:count = self.languages[repo.language] = for repo_languages in repo.iter_languages():self.languages_json[repo.name][repo_languages[]] = repo_languages[]for language in repo_languages:if isinstance(language, str):temp_repo.languages.append(language)self.previous_language = languageelse:try:self.languages_size[self.previous_language] +=languageexcept KeyError:size = self.languages_size[self.previous_language]= language", "docstring": "Retrieves the languages used in the repo and increments the respective\ncounts of those languages. Only increments languages that have names.\nAnything else is not incremented (i.e. numbers).", "id": "f4697:c0:m11"} {"signature": "def get_mems_of_org(self):", "body": "print('')counter = for member in self.org_retrieved.iter_members():self.members_json[member.id] = member.to_json()counter += return counter", "docstring": "Retrieves the number of members of the organization.", "id": "f4697:c0:m5"} {"signature": "def get_total_contributors(self, repo):", "body": "repo_contributors = for contributor in repo.iter_contributors():repo_contributors += self.unique_contributors[contributor.id].append(repo.name)self.contributors_json[repo.name].append(contributor.to_json())return repo_contributors", "docstring": "Retrieves the number of contributors to a repo in the organization.\nAlso adds to unique contributor list.", "id": "f4697:c0:m8"} {"signature": "def get_readme(self, repo):", "body": "readme_contents = repo.readme()if readme_contents is not None:self.total_readmes += return ''if self.search_limit >= :print('')time.sleep()self.search_limit = self.search_limit += search_results = self.logged_in_gh.search_code(''+ '' + repo.full_name)try:for result in search_results:path = result.path[:]if '' not in path and '' in path.lower():self.total_readmes += return pathreturn ''except (github3.models.GitHubError, StopIteration) as e:return ''", "docstring": "Checks to see if the given repo has a ReadMe. MD means it has a correct\nReadme recognized by GitHub.", "id": "f4697:c0:m12"} {"signature": "def write_languages(self, file_path='',date=str(datetime.date.today())):", "body": "self.remove_date(file_path=file_path, date=date)languages_exists = os.path.isfile(file_path)with open(file_path, '') as out_languages:if not languages_exists:out_languages.write('')languages_sorted = sorted(self.languages_size)for language in languages_sorted:try:out_languages.write(date + '' + language + ''+ str(self.languages[language]) + ''+ str(self.languages_size[language]) + ''+ str(math.log10(int(self.languages_size[language])))+ '')except (TypeError, KeyError) as e:out_languages.write(date + '' + language + ''+ str() + ''+ str(self.languages_size[language]) + ''+ str(math.log10(int(self.languages_size[language])))+ '')", "docstring": "Updates languages.csv file with current data.", "id": "f4697:c0:m19"} {"signature": "def get_commits(self, repo, organization=''):", "body": "path = ('' + organization + '' + repo.name + '')is_only_today = Falseif not os.path.exists(path): all_commits = repo.iter_commits()is_only_today = Trueelse:files = os.listdir(path)date = str(files[-][:-])if date == str(datetime.date.today()):if len(files) > :date = str(files[-][:-])else:all_commits = repo.iter_commits()is_only_today = Trueif not is_only_today:all_commits = repo.iter_commits(since=date)for commit in all_commits:self.commits_json[repo.name].append(commit.to_json())count = for commit in repo.iter_commits():count += return count", "docstring": "Retrieves the number of commits to a repo in the organization. If it is\nthe first time getting commits for a repo, it will get all commits and\nsave them to JSON. If there are previous commits saved, it will only get\ncommits that have not been saved to disk since the last date of commits.", "id": "f4697:c0:m14"} {"signature": "def get_org(self, organization_name=''):", "body": "self.organization_name = organization_nameif(organization_name == ''):self.organization_name = input('')print('')self.org_retrieved = self.logged_in_gh.organization(organization_name)", "docstring": "Retrieves an organization via given org name. If given\nempty string, prompts user for an org name.", "id": "f4698:c0:m4"} {"signature": "def write_json(self, date=(datetime.date.today()),organization='',dict_to_write={}, path_ending_type=''):", "body": "for repo in dict_to_write:if len(dict_to_write[repo]) != :path = ('' + organization + '' + repo + '' +path_ending_type + '' + str(date) + '')self.checkDir(path)with open(path, '') as out:out.write(json.dumps(dict_to_write[repo], sort_keys=True,indent=, separators=('', '')))out.close()", "docstring": "Writes all traffic data to file in JSON form.", "id": "f4698:c0:m10"} {"signature": "def check_data_redundancy(self, file_path='', dict_to_check={}):", "body": "count = exists = os.path.isfile(file_path)previous_dates = {}if exists:with open(file_path, '') as input:input.readline()for row in csv.reader(input):timestamp = calendar.timegm(time.strptime(row[],''))if timestamp in dict_to_check:del dict_to_check[timestamp]count += input.close()return count", "docstring": "Checks the given csv file against the json data scraped for the given\ndict. It will remove all data retrieved that has already been recorded\nso we don't write redundant data to file. Returns count of rows from\nfile.", "id": "f4698:c0:m12"} {"signature": "def get_year_commits(self, username='', password='', organization='', force=True):", "body": "date = str(datetime.date.today())file_path = ('')if force or not os.path.isfile(file_path):my_github.login(username, password)calls_beginning = self.logged_in_gh.ratelimit_remaining + print('' + str(calls_beginning))my_github.get_org(organization)my_github.repos(building_stats=True)print(\"\")time.sleep()print(\"\")my_github.repos(building_stats=False)my_github.calc_total_commits(starting_commits=)my_github.write_to_file()calls_remaining = self.logged_in_gh.ratelimit_remainingcalls_used = calls_beginning - calls_remainingprint(('' + str(calls_remaining) + ''+ str(calls_used) + ''))", "docstring": "Does setup such as login, printing API info, and waiting for GitHub to\nbuild the commit statistics. Then gets the last year of commits and\nprints them to file.", "id": "f4699:c0:m1"} {"signature": "def repos(self, building_stats=False):", "body": "print('')for repo in self.org_retrieved.iter_repos():for activity in repo.iter_commit_activity():if not building_stats:self.commits_dict_list.append(activity)", "docstring": "Retrieves the last year of commits for the organization and stores them\nin weeks (UNIX time) associated with number of commits that week.", "id": "f4699:c0:m5"} {"signature": "def get_org(self, organization_name=''):", "body": "if(organization_name == ''):organization_name = input('')print('')self.org_retrieved = self.logged_in_gh.organization(organization_name)", "docstring": "Retrieves an organization via given org name. If given\nempty string, prompts user for an org name.", "id": "f4699:c0:m4"} {"signature": "def get_stargazers(self, url, headers={}):", "body": "url = url + ''page = gazers = []json_data = requests.get(url % page, headers=headers).json()while json_data:gazers.extend(json_data)page += json_data = requests.get(url % page, headers=headers).json()return gazers", "docstring": "Return a list of the stargazers of a GitHub repo\n\nIncludes both the 'starred_at' and 'user' data.\n\nparam: url\n url is the 'stargazers_url' of the form:\n https://api.github.com/repos/LLNL/spack/stargazers", "id": "f4702:c0:m6"} {"signature": "def setup_module(module):", "body": "logger.debug('')pass", "docstring": "Setup for the owc test module", "id": "f4708:m2"} {"signature": "def scratch_file(filename):", "body": "return os.path.join(scratch_directory(), filename)", "docstring": "Helper function to return file path in the tests scratch directory", "id": "f4736:m4"} {"signature": "def cast_tuple_int_list(tup):", "body": "return [int(a) for a in tup]", "docstring": "Set tuple float values to int for more predictable test results", "id": "f4736:m7"} {"signature": "def cleanup_namespaces(element):", "body": "if etree.__name__ == '':etree.cleanup_namespaces(element)return elementelse:return etree.fromstring(etree.tostring(element))", "docstring": "Remove unused namespaces from an element", "id": "f4746:m5"} {"signature": "def xmlvalid(xml, xsd):", "body": "xsd1 = etree.parse(xsd)xsd2 = etree.XMLSchema(xsd1)doc = etree.parse(StringIO(xml))return xsd2.validate(doc)", "docstring": "Test whether an XML document is valid\n\nParameters\n----------\n\n- xml: XML content\n- xsd: pointer to XML Schema (local file path or URL)", "id": "f4746:m13"} {"signature": "def xml2string(xml):", "body": "warnings.warn(\"\")return '' + xml", "docstring": "Return a string of XML object\n\nParameters\n----------\n\n- xml: xml string", "id": "f4746:m12"} {"signature": "def build_get_url(base_url, params, overwrite=False):", "body": "qs_base = []if base_url.find('') != -:qs_base = cgi.parse_qsl(base_url.split('')[])qs_params = []for key, value in six.iteritems(params):qs_params.append((key, value))qs = qs_add = []if overwrite is True:qs = qs_paramsqs_add = qs_baseelse:qs = qs_baseqs_add = qs_paramspars = [x[] for x in qs]for key, value in qs_add:if key not in pars:qs.append((key, value))urlqs = urlencode(tuple(qs))return base_url.split('')[] + '' + urlqs", "docstring": "Utility function to build a full HTTP GET URL from the service base URL and a dictionary of HTTP parameters.\n\n TODO: handle parameters case-insensitive?\n\n @param overwrite: boolean flag to allow overwrite of parameters of the base_url (default: False)", "id": "f4746:m16"} {"signature": "def getTypedValue(data_type, value):", "body": "if data_type == '':return True if value.lower() == '' else Falseelif data_type == '':return int(value)elif data_type == '':return float(value)elif data_type == '':return str(value)else:return value", "docstring": "Utility function to cast a string value to the appropriate XSD type.", "id": "f4746:m18"} {"signature": "def extract_xml_list(elements):", "body": "keywords = (re.split(r'',f.text) for f in elements if f.text)flattened = (item.strip() for sublist in keywords for item in sublist)remove_blank = [_f for _f in flattened if _f]return remove_blank", "docstring": "Some people don't have seperate tags for their keywords and seperate them with\na newline. This will extract out all of the keywords correctly.", "id": "f4746:m20"} {"signature": "def encode_string(text):", "body": "if six.PY3:return textif isinstance(text, str):return text.decode('').encode('', '')return text.encode('', '')", "docstring": "On Python 3 this method does nothing and returns the ``text`` string itself.\nOn Python 2 this method returns the ``text`` string encoded with UTF-8.\n\nSee:\n* https://pythonhosted.org/six/#six.python_2_unicode_compatible\n* https://www.azavea.com/blog/2014/03/24/solving-unicode-problems-in-python-2-7/", "id": "f4746:m29"} {"signature": "def datetime_from_ansi(ansi):", "body": "datumOrigin = datetime(,,,,,)return datumOrigin + timedelta(ansi)", "docstring": "Converts an ansiDate (expressed as a number = the nuber of days since the datum origin of ansi) to a python datetime object.", "id": "f4746:m27"} {"signature": "def which_etree():", "body": "which_etree = Noneif '' in etree.__file__:which_etree = ''elif '' in etree.__file__:which_etree = ''elif '' in etree.__file__:which_etree = ''return which_etree", "docstring": "decipher which etree library is being used by OWSLib", "id": "f4746:m24"} {"signature": "def getService_urls(self, service_string=None):", "body": "urls=[]for key,rec in six.iteritems(self.records):url = next((d[''] for d in rec.references if d[''] == service_string), None)if url is not None:urls.append(url)return urls", "docstring": "Return easily identifiable URLs for all service types\n\nParameters\n----------\n\n- service_string: a URI to lookup", "id": "f4748:c0:m9"} {"signature": "def getrecords(self, qtype=None, keywords=[], typenames='', propertyname='', bbox=None, esn='', sortby=None, outputschema=namespaces[''], format=outputformat, startposition=, maxrecords=, cql=None, xml=None, resulttype=''):", "body": "warnings.warn(\"\"\"\"\"\")if xml is not None:self.request = etree.fromstring(xml)val = self.request.find(util.nspath_eval('', namespaces))if val is not None:esn = util.testXMLValue(val)else:node0 = self._setrootelement('')if etree.__name__ != '': node0.set('', namespaces[''])node0.set('', namespaces[''])node0.set('', namespaces[''])node0.set('', namespaces[''])node0.set('', outputschema)node0.set('', format)node0.set('', self.version)node0.set('', resulttype)node0.set('', self.service)if startposition > :node0.set('', str(startposition))node0.set('', str(maxrecords))node0.set(util.nspath_eval('', namespaces), schema_location)node1 = etree.SubElement(node0, util.nspath_eval('', namespaces))node1.set('', typenames)etree.SubElement(node1, util.nspath_eval('', namespaces)).text = esnself._setconstraint(node1, qtype, propertyname, keywords, bbox, cql, None)if sortby is not None:fes.setsortby(node1, sortby)self.request = node0self._invoke()if self.exceptionreport is None:self.results = {}val = self._exml.find(util.nspath_eval('', namespaces)).attrib.get('')self.results[''] = int(util.testXMLValue(val, True))val = self._exml.find(util.nspath_eval('', namespaces)).attrib.get('')self.results[''] = int(util.testXMLValue(val, True))val = self._exml.find(util.nspath_eval('', namespaces)).attrib.get('')self.results[''] = int(util.testXMLValue(val, True))self.records = OrderedDict()self._parserecords(outputschema, esn)", "docstring": "Construct and process a GetRecords request\n\nParameters\n----------\n\n- qtype: type of resource to query (i.e. service, dataset)\n- keywords: list of keywords\n- typenames: the typeNames to query against (default is csw:Record)\n- propertyname: the PropertyName to Filter against \n- bbox: the bounding box of the spatial query in the form [minx,miny,maxx,maxy]\n- esn: the ElementSetName 'full', 'brief' or 'summary' (default is 'summary')\n- sortby: property to sort results on\n- outputschema: the outputSchema (default is 'http://www.opengis.net/cat/csw/2.0.2')\n- format: the outputFormat (default is 'application/xml')\n- startposition: requests a slice of the result set, starting at this position (default is 0)\n- maxrecords: the maximum number of records to return. No records are returned if 0 (default is 10)\n- cql: common query language text. Note this overrides bbox, qtype, keywords\n- xml: raw XML request. Note this overrides all other options\n- resulttype: the resultType 'hits', 'results', 'validate' (default is 'results')", "id": "f4748:c0:m3"} {"signature": "def getrecordbyid(self, id=[], esn='', outputschema=namespaces[''], format=outputformat):", "body": "data = {'': self.service,'': self.version,'': '','': format,'': outputschema,'': esn,'': ''.join(id),}self.request = urlencode(data)self._invoke()if self.exceptionreport is None:self.results = {}self.records = OrderedDict()self._parserecords(outputschema, esn)", "docstring": "Construct and process a GetRecordById request\n\nParameters\n----------\n\n- id: the list of Ids\n- esn: the ElementSetName 'full', 'brief' or 'summary' (default is 'full')\n- outputschema: the outputSchema (default is 'http://www.opengis.net/cat/csw/2.0.2')\n- format: the outputFormat (default is 'application/xml')", "id": "f4748:c0:m4"} {"signature": "def get_operation_by_name(self, name):", "body": "for item in self.operations:if item.name.lower() == name.lower():return itemraise KeyError(\"\" % name)", "docstring": "Return a named operation", "id": "f4748:c0:m8"} {"signature": "def __init__(self, md):", "body": "_GenericObject.__init__(self, md)self.level = util.testXMLValue(md.find(util.nspath_eval('', namespaces)))val = md.find(util.nspath_eval('', namespaces))self.data_quality = _GenericObjectProperty(val)", "docstring": "constructor", "id": "f4750:c18:m0"} {"signature": "def __init__(self, md):", "body": "Core.__init__(self, md)", "docstring": "constructor", "id": "f4750:c50:m0"} {"signature": "def __init__(self, md):", "body": "_GenericObject.__init__(self, md)self.number = util.testXMLValue(md.find(util.nspath_eval('', namespaces)))self.number_type = util.testXMLValue(md.find(util.nspath_eval('', namespaces)))val = md.find(util.nspath_eval('', namespaces))self.responsible_party = _GenericObjectProperty(val)", "docstring": "constructor", "id": "f4750:c17:m0"} {"signature": "def __init__(self, md):", "body": "_GenericObject.__init__(self, md)val = md.find(util.nspath_eval('', namespaces))self.point_of_contact = _GenericObjectProperty(val)val = md.find(util.nspath_eval('', namespaces))self.identification = _GenericObjectProperty(val)val = md.find(util.nspath_eval('', namespaces))if val is not None:self.role = util.testXMLValue(val.find(util.nspath_eval('', namespaces)))", "docstring": "constructor", "id": "f4750:c47:m0"} {"signature": "def __init__(self, md):", "body": "_GenericObject.__init__(self, md)val = md.find(util.nspath_eval('', namespaces))self.distribution = _GenericObjectProperty(val)", "docstring": "constructor", "id": "f4750:c12:m0"} {"signature": "def __init__(self, md):", "body": "_GenericObject.__init__(self, md)val = md.find(util.nspath_eval('', namespaces))if val is not None:self.title = PT_FreeText(val)val = md.find(util.nspath_eval('', namespaces))self.authority = _GenericObjectProperty(val)", "docstring": "constructor", "id": "f4750:c31:m0"} {"signature": "def __init__(self, md):", "body": "_GenericObject.__init__(self, md)self.extent_type_code = util.testXMLValue(md.find(util.nspath_eval('', namespaces)))boundaries = []for boundary in md.findall(util.nspath_eval('', namespaces)):polylines = []for polyline in boundary.findall(util.nspath_eval('', namespaces)):coords = []arcs = []for coord in polyline.findall(util.nspath_eval('', namespaces)):c1 = util.testXMLValue(coord.find(util.nspath_eval('', namespaces)))c2 = util.testXMLValue(coord.find(util.nspath_eval('', namespaces)))c3 = util.testXMLValue(coord.find(util.nspath_eval('', namespaces)))coordvalue = {'': c1, '': c2, '': c3}coords.append(coordvalue)for arc in polyline.findall(util.nspath_eval('', namespaces)):c1 = util.testXMLValue(coord.find(util.nspath_eval('', namespaces)))c2 = util.testXMLValue(coord.find(util.nspath_eval('', namespaces)))c3 = util.testXMLValue(coord.find(util.nspath_eval('', namespaces)))a1 = util.testXMLValue(coord.find(util.nspath_eval('', namespaces)))a2 = util.testXMLValue(coord.find(util.nspath_eval('', namespaces)))r = util.testXMLValue(coord.find(util.nspath_eval('', namespaces)))arcpoint = {'': c1, '': c2, '': c3, '': a1, '': a2, '': r}arcs.append(arcpoint)polylines.append(coords)polylines.append(arcs)boundaries.append(polylines)self.boundary = boundaries", "docstring": "constructor", "id": "f4750:c25:m0"} {"signature": "def __init__(self, md):", "body": "_GenericObject.__init__(self, md)", "docstring": "constructor", "id": "f4750:c10:m0"} {"signature": "def __init__(self, md):", "body": "EX_TemporalExtent.__init__(self, md)", "docstring": "constructor", "id": "f4750:c36:m0"} {"signature": "def __init__(self, md):", "body": "_GenericObject.__init__(self, md)self.minimum_value = util.testXMLValue(md.find(util.nspath_eval('', namespaces)))self.maximum_value = util.testXMLValue(md.find(util.nspath_eval('', namespaces)))self.unit_of_measure = util.testXMLValue(md.find(util.nspath_eval('', namespaces)))val = md.find(util.nspath_eval('', namespaces))self.vertical_datum = _GenericObjectProperty(val)", "docstring": "constructor", "id": "f4750:c19:m0"} {"signature": "def __init__(self, md):", "body": "_GenericObject.__init__(self, md)val = md.find(util.nspath_eval('', namespaces))if val is not None:self.status = util.testXMLValue(val.find(util.nspath_eval('', namespaces)))val = md.find(util.nspath_eval('', namespaces))if val is not None:self.abstract = PT_FreeText(val)val = md.find(util.nspath_eval('', namespaces))if val is not None:self.purpose = PT_FreeText(val)val = md.find(util.nspath_eval('', namespaces))self.metadata = _GenericObjectProperty(val)val = md.find(util.nspath_eval('', namespaces))self.citation = _GenericObjectProperty(val)val = md.find(util.nspath_eval('', namespaces))if val is not None:self.spatial_representation_type = util.testXMLValue(val.find(util.nspath_eval('', namespaces)))val = md.find(util.nspath_eval('', namespaces))if val is not None:self.language = util.testXMLValue(val.find(util.nspath_eval('', namespaces)))val = md.find(util.nspath_eval('', namespaces))if val is not None:self.character_set = util.testXMLValue(val.find(util.nspath_eval('', namespaces)))val = md.find(util.nspath_eval('', namespaces))if val is not None:self.topic_category = util.testXMLValue(val.find(util.nspath_eval('', namespaces)))", "docstring": "constructor", "id": "f4750:c40:m0"} {"signature": "def __init__(self, md):", "body": "_GenericObject.__init__(self, md)self.hours_of_service = util.testXMLValue(md.find(util.nspath_eval('', namespaces)))val = md.find(util.nspath_eval('', namespaces))if val is not None:self.contact_instructions = PT_FreeText(val)", "docstring": "constructor", "id": "f4750:c32:m0"} {"signature": "def __init__(self, md):", "body": "self.tid = Noneif md is not None:self.tid = md.attrib.get('')", "docstring": "constructor", "id": "f4750:c0:m0"} {"signature": "def getfeatureinfo(**kw):", "body": "", "docstring": "Make a request to the WMS, returns data.", "id": "f4753:c6:m2"} {"signature": "def getcapabilities(**kw):", "body": "", "docstring": "Makes a GetCapabilities request to the remote WPS server,\nreturns an XML document wrapped in a python file-like object.", "id": "f4753:c10:m0"} {"signature": "def axml_context(d):", "body": "xml = etree.Element(\"\", nsmap=ns)etree.SubElement(xml, \"\").text = d['']spec_reference = [axml_link(do) for do inextract_p('', d, [])][xml.append(el) for el in spec_reference if el is not None]area_of_interest = extract_p('', d, None)if area_of_interest is not None:try:gml = etree.fromstring(area_of_interest)georss = etree.SubElement(xml, ns_elem(\"\", \"\"))georss.append(gml)except Exception as ex:log.warn('', ex)passcontext_metadata = [axml_link(do) for do inextract_p('', d, [])][xml.append(el) for el in context_metadata if el is not None]language = extract_p('', d, None)if language is not None: xml.set(ns_elem(\"\", \"\"), language)title = extract_p('', d, None)if title is not None: etree.SubElement(xml, \"\").text = titlesubtitle = extract_p('', d, None)if subtitle is not None: etree.SubElement(xml, \"\").text = subtitleupdate_date = extract_p('', d, None)if update_date is not None: etree.SubElement(xml, \"\").text = update_dateauthors = [axml_author(do) for do in extract_p('', d, [])][xml.append(el) for el in authors if el is not None]publisher = extract_p('', d, None)if publisher is not None: etree.SubElement(xml, ns_elem(\"\", \"\")).text = publishercreator_application = axml_creator_app(extract_p('', d, None))if creator_application is not None and not is_empty(creator_application): xml.append(creator_application)creator_display = axml_display(extract_p('', d, None))if creator_display is not None: xml.append(creator_display)rights = extract_p('', d, None)if rights is not None: etree.SubElement(xml, \"\").text = rightstime_interval_of_interest = extract_p('', d, None)if time_interval_of_interest is not None: etree.SubElement(xml,ns_elem(\"\", \"\")).text = time_interval_of_interestkeywords = [axml_category(do) for do inextract_p('', d, [])][xml.append(el) for el in keywords if el is not None]resources = [axml_resource(do) for do inextract_p('', d, [])][xml.append(el) for el in resources if el is not None]return xml", "docstring": "encodes base OwcContext as dict to atom xml tree\n:param d:\n:return:", "id": "f4754:m7"} {"signature": "def __init__(self,pixel_width,pixel_height=None,mm_per_pixel=None,):", "body": "self.pixel_width = pixel_widthself.pixel_height = pixel_heightself.mm_per_pixel = mm_per_pixel", "docstring": "constructor:\n\n:param pixel_width: Double\n:param pixel_height: Double\n:param mm_per_pixel: Double", "id": "f4757:c4:m0"} {"signature": "@classmethoddef from_atomxml(cls, xml_bytes):", "body": "d = decode_atomxml(xml_bytes)return cls.from_dict(d)", "docstring": "lets see if we can reuse the dict structure builder from geojson\n:param xmlstring:\n:return: OwcContext", "id": "f4757:c0:m7"} {"signature": "def __init__(self,name=None,email=None,uri=None):", "body": "self.name = nameself.email = emailself.uri = uri", "docstring": "constructor:\n\n:param name: String\n:param email: String (EmailAddress)\n:param uri: URL", "id": "f4757:c7:m0"} {"signature": "def __init__(self,offering_code,operations=[],contents=[],styles=[]):", "body": "self.offering_code = offering_codeself.operations = operationsself.contents = contentsself.styles = styles", "docstring": "constructor:\n\n:param offering_code: URL\n:param operations: List[OwcOperation]\n:param contents: List[OwcContent]\n:param styles: List[OwcStyleSet]", "id": "f4757:c8:m0"} {"signature": "def __init__(self,id,title,update_date,subtitle=None,authors=[],publisher=None,rights=None,geospatial_extent=None,temporal_extent=None,content_description=[],preview=[],content_by_ref=[],offerings=[],active=False,resource_metadata=[],keywords=[],min_scale_denominator=None,max_scale_denominator=None,folder=None):", "body": "self.id = idself.title = titleself.subtitle = subtitleself.update_date = update_dateself.authors = authorsself.publisher = publisherself.rights = rightsself.geospatial_extent = geospatial_extentself.temporal_extent = temporal_extentself.content_description = content_descriptionself.preview = previewself.content_by_ref = content_by_refself.offerings = offeringsself.active = activeself.resource_metadata = resource_metadataself.keywords = keywordsself.min_scale_denominator = min_scale_denominatorself.max_scale_denominator = max_scale_denominatorself.folder = folder", "docstring": "constructor:\n\n:param id: URL\n:param title: String\n:param update_date: datetime\n:param subtitle: String\n:param authors: List[OwcAuthor]\n:param publisher: String\n:param rights: String\n:param geospatial_extent: currently GeoJSON Polygon String\n:param temporal_extent: str\n:param content_description: OwcLink[] links.alternates, rel=alternate\n:param preview: OwcLink[] aka links.previews[] and rel=icon (atom)\n:param content_by_ref: OwcLink[], links.data, rel=enclosure (atom)\n:param offerings: OwcOffering[]\n:param active: Boolean\n:param resource_metadata: OwcLink[] aka links.via[] & rel=via\n:param keywords: OwcCategory[]\n:param min_scale_denominator: Double\n:param max_scale_denominator: Double\n:param folder: String", "id": "f4757:c1:m0"} {"signature": "def _readFromUrl(self, url, data, timeout, method='', username=None, password=None,headers=None, verify=True, cert=None):", "body": "if method == '':request_url = build_get_url(url, data, overwrite=True)log.debug(request_url)spliturl = request_url.split('')u = openURL(spliturl[], spliturl[], method='', username=username, password=password,headers=headers, verify=verify, cert=cert, timeout=self.timeout)return etree.fromstring(u.read())elif method == '':u = openURL(url, data, method='',username=username, password=password,headers=headers, verify=verify, cert=cert, timeout=timeout)return etree.fromstring(u.read())else:raise Exception(\"\" % method)", "docstring": "Method to get and parse a WPS document, returning an elementtree instance.\n:param str url: WPS service base url.\n:param str data: GET: dictionary of HTTP (key, value) parameter pairs, POST: XML document to post\n:param str username: optional user credentials\n:param str password: optional user credentials", "id": "f4759:c2:m1"} {"signature": "def _parseComplexData(self, element, complexDataElementName):", "body": "complex_data_element = element.find(complexDataElementName)if complex_data_element is not None:self.dataType = \"\"for supported_comlexdata_element incomplex_data_element.findall(''):self.supportedValues.append(ComplexData(mimeType=testXMLValue(supported_comlexdata_element.find('')),encoding=testXMLValue(supported_comlexdata_element.find('')),schema=testXMLValue(supported_comlexdata_element.find(''))))for format_element incomplex_data_element.findall(''):self.supportedValues.append(ComplexData(mimeType=testXMLValue(format_element.find('')),encoding=testXMLValue(format_element.find('')),schema=testXMLValue(format_element.find(''))))default_format_element = complex_data_element.find('')if default_format_element is not None:self.defaultValue = ComplexData(mimeType=testXMLValue(default_format_element.find('')),encoding=testXMLValue(default_format_element.find('')),schema=testXMLValue(default_format_element.find('')))", "docstring": "Method to parse a ComplexData or ComplexOutput element.", "id": "f4759:c8:m3"} {"signature": "def retrieveData(self, username=None, password=None, headers=None, verify=True, cert=None):", "body": "url = self.referenceif url is None:return \"\"log.info('' % url)if '' in url:spliturl = url.split('')u = openURL(spliturl[], spliturl[], method='', username=username, password=password,headers=headers, verify=verify, cert=cert)self.fileName = spliturl[].split('')[]else:u = openURL(url, '', method='', username=username, password=password,headers=headers, verify=verify, cert=cert)self.fileName = url.split('')[-]return u.read()", "docstring": "Method to retrieve data from server-side reference:\nreturns \"\" if the reference is not known.\n\n:param username: credentials to access the remote WPS server\n:param password: credentials to access the remote WPS server", "id": "f4759:c10:m1"} {"signature": "def __init__(self, url, version=WPS_DEFAULT_VERSION, username=None, password=None, verbose=False, skip_caps=False,headers=None, verify=True, cert=None, timeout=None):", "body": "self.url = clean_ows_url(url)self.username = usernameself.password = passwordself.version = versionself.verbose = verboseself.headers = headersself.verify = verifyself.cert = certself.timeout = timeoutself._capabilities = Noneself.identification = Noneself.provider = Noneself.operations = []self.processes = []if not skip_caps:self.getcapabilities()", "docstring": "Initialization method resets the object status.\nBy default it will execute a GetCapabilities invocation to the remote service,\nwhich can be skipped by using skip_caps=True.", "id": "f4759:c1:m0"} {"signature": "def _parseExceptionReport(self, root):", "body": "if self.status is None:self.status = \"\"for exceptionEl in root.findall(nspath('', ns=namespaces[''])):self.errors.append(WPSException(exceptionEl))", "docstring": "Method to parse a WPS ExceptionReport document and populate this object's metadata.", "id": "f4759:c6:m11"} {"signature": "def readFromUrl(self, url, username=None, password=None,headers=None, verify=True, cert=None):", "body": "return self._readFromUrl(url,{'': '', '':'', '': self.version},self.timeout,username=username, password=password,headers=headers, verify=verify, cert=cert)", "docstring": "Method to get and parse a WPS capabilities document, returning an elementtree instance.\n\n:param str url: WPS service base url, to which is appended the HTTP parameters: service, version, and request.\n:param str username: optional user credentials\n:param str password: optional user credentials", "id": "f4759:c3:m1"} {"signature": "def execute(self, identifier, inputs, output=None, mode=ASYNC, lineage=False, request=None, response=None):", "body": "log.info('')execution = WPSExecution(version=self.version, url=self.url,username=self.username, password=self.password, verbose=self.verbose,headers=self.headers, verify=self.verify, cert=self.cert, timeout=self.timeout)if request is None:requestElement = execution.buildRequest(identifier, inputs, output, mode=mode, lineage=lineage)request = etree.tostring(requestElement)execution.request = requestlog.debug(request)if response is None:response = execution.submitRequest(request)else:response = etree.fromstring(response)log.debug(etree.tostring(response))execution.parseResponse(response)return execution", "docstring": "Submits a WPS process execution request.\nReturns a WPSExecution object, which can be used to monitor the status of the job, and ultimately\nretrieve the result.\n\n:param str identifier: the requested process identifier\n:param inputs: list of process inputs as (input_identifier, value) tuples (where value is either a string\n for LiteralData, or an object for ComplexData).\n:param output: optional list of process outputs as tuples (output_identifier, as_ref, mime_type).\n `as_ref` can be True (as reference),\n False (embedded in response) or None (use service default).\n `mime_type` should be text or None (use service default)\n:param mode: execution mode: SYNC, ASYNC or AUTO. Default: ASYNC\n:param lineage: if lineage is \"true\", the Execute operation response shall include the DataInputs and\n OutputDefinitions elements.\n:param request: optional pre-built XML request document, prevents building of request from other arguments\n:param response: optional pre-built XML response document, prevents submission of request to live WPS server", "id": "f4759:c1:m3"} {"signature": "def readFromUrl(self, url, identifier, username=None, password=None,headers=None, verify=True, cert=None):", "body": "return self._readFromUrl(url,{'': '', '': '','': self.version, '': identifier},self.timeout,username=username, password=password,headers=headers, verify=verify, cert=cert)", "docstring": "Reads a WPS DescribeProcess document from a remote service and returns the XML etree object\n\n:param str url: WPS service base url, to which is appended the HTTP parameters: 'service', 'version',\n 'request', and 'identifier'.", "id": "f4759:c4:m1"} {"signature": "def _parseBoundingBoxData(self, element, bboxElementName):", "body": "bbox_data_element = element.find(bboxElementName)if bbox_data_element is not None:self.dataType = ''for bbox_element in bbox_data_element.findall(''):self.supportedValues.append(bbox_element.text)default_bbox_element = bbox_data_element.find('')if default_bbox_element is not None:self.defaultValue = default_bbox_element.text", "docstring": "Method to parse the BoundingBoxData element.", "id": "f4759:c8:m4"} {"signature": "def items(self, srs=None, profile=None):", "body": "items=[]if not srs and not profile:for item in self.contents:items.append((item,self.contents[item]))elif srs and profile:for item in self.contents:if (self.contents[item].srs == srs andself.contents[item].profile == profile):items.append((item,self.contents[item]))elif srs:for item in self.contents:if self.contents[item].srs == srs:items.append((item,self.contents[item]))elif profile:for item in self.contents:if self.contents[item].profile == profile:items.append((item,self.contents[item]))return items", "docstring": "supports dict-like items() access", "id": "f4760:c0:m4"} {"signature": "def __init__(self, url, version='', xml=None, username=None, password=None, parse_remote_metadata=False, timeout=):", "body": "self.url = urlself.username = usernameself.password = passwordself.version = versionself.timeout = timeoutself.services = Noneself._capabilities = Noneself.contents={}reader = TMSCapabilitiesReader(self.version, url=self.url, un=self.username, pw=self.password)if xml: self._capabilities = reader.readString(xml)else: self._capabilities = reader.read(self.url, timeout=self.timeout)self._buildMetadata(parse_remote_metadata)", "docstring": "Initialize.", "id": "f4760:c0:m0"} {"signature": "def WebCoverageService(url, version=None, xml=None, cookies=None, timeout=):", "body": "if version is None:if xml is None:reader = wcsBase.WCSCapabilitiesReader()request = reader.capabilities_url(url)xml = openURL(request, cookies=cookies, timeout=timeout).read()capabilities = etree.etree.fromstring(xml)version = capabilities.get('')del capabilitiesclean_url = clean_ows_url(url)if version == '':return wcs100.WebCoverageService_1_0_0.__new__(wcs100.WebCoverageService_1_0_0, clean_url, xml, cookies)elif version == '':return wcs110.WebCoverageService_1_1_0.__new__(wcs110.WebCoverageService_1_1_0, url, xml, cookies)elif version == '':return wcs111.WebCoverageService_1_1_1.__new__(wcs111.WebCoverageService_1_1_1, url, xml, cookies)elif version == '':return wcs200.WebCoverageService_2_0_0.__new__(wcs200.WebCoverageService_2_0_0, url, xml, cookies)elif version == '':return wcs201.WebCoverageService_2_0_1.__new__(wcs201.WebCoverageService_2_0_1, url, xml, cookies)", "docstring": "wcs factory function, returns a version specific WebCoverageService object", "id": "f4761:m0"} {"signature": "def items(self):", "body": "items = []for item in self.contents:items.append((item, self.contents[item]))return items", "docstring": "supports dict-like items() access", "id": "f4763:c0:m3"} {"signature": "def __getitem__(self, name):", "body": "if name in self.__getattribute__(''):return self.__getattribute__('')[name]else:raise KeyError(\"\" % name)", "docstring": "check contents dictionary to allow dict\n like access to service layers", "id": "f4763:c0:m0"} {"signature": "def read(self, service_url, timeout=):", "body": "self.request = self.capabilities_url(service_url)spliturl = self.request.split('')u = openURL(spliturl[], spliturl[], method='',username=self.username,password=self.password,timeout=timeout,headers=self.headers)raw_text = strip_bom(u.read())return etree.fromstring(raw_text)", "docstring": "Get and parse a WMS capabilities document, returning an\n elementtree instance\n\n service_url is the base url, to which is appended the service,\n version, and request parameters", "id": "f4764:c0:m2"} {"signature": "def readString(self, st):", "body": "if not isinstance(st, str) and not isinstance(st, bytes):raise ValueError(\"\" % type(st))raw_text = strip_bom(st)return etree.fromstring(raw_text)", "docstring": "Parse a WMS capabilities document, returning an elementtree instance.\n\n string should be an XML capabilities document", "id": "f4764:c0:m3"} {"signature": "def getmap(self, layers=None, styles=None, srs=None, bbox=None,format=None, size=None, time=None, transparent=False,bgcolor='',exceptions='',method='',timeout=None,**kwargs):", "body": "try:base_url = next((m.get('') for m in self.getOperationByName('').methods if m.get('').lower() == method.lower()))except StopIteration:base_url = self.urlrequest = {'': self.version, '': ''}request = self.__build_getmap_request(layers=layers,styles=styles,srs=srs,bbox=bbox,format=format,size=size,time=time,transparent=transparent,bgcolor=bgcolor,exceptions=exceptions,**kwargs)data = urlencode(request)self.request = bind_url(base_url) + datau = openURL(base_url, data, method, username=self.username, password=self.password, timeout=timeout or self.timeout)if u.info().get('', '').split('')[] in ['']:se_xml = u.read()se_tree = etree.fromstring(se_xml)err_message = six.text_type(se_tree.find('').text).strip()raise ServiceException(err_message)return u", "docstring": "Request and return an image from the WMS as a file-like object.\n\n Parameters\n ----------\n layers : list\n List of content layer names.\n styles : list\n Optional list of named styles, must be the same length as the\n layers list.\n srs : string\n A spatial reference system identifier.\n bbox : tuple\n (left, bottom, right, top) in srs units.\n format : string\n Output image format such as 'image/jpeg'.\n size : tuple\n (width, height) in pixels.\n transparent : bool\n Optional. Transparent background if True.\n bgcolor : string\n Optional. Image background color.\n method : string\n Optional. HTTP DCP method name: Get or Post.\n **kwargs : extra arguments\n anything else e.g. vendor specific parameters\n\n Example\n -------\n wms = WebMapService('http://giswebservices.massgis.state.ma.us/geoserver/wms', version='1.1.1')\n img = wms.getmap(layers=['massgis:GISDATA.SHORELINES_ARC'],\\\n styles=[''],\\\n srs='EPSG:4326',\\\n bbox=(-70.8, 42, -70, 42.8),\\\n size=(300, 300),\\\n format='image/jpeg',\\\n transparent=True)\n out = open('example.jpg', 'wb')\n bytes_written = out.write(img.read())\n out.close()", "id": "f4766:c1:m6"} {"signature": "def __getitem__(self, name):", "body": "if name in self.__getattribute__(''):return self.__getattribute__('')[name]else:raise KeyError(\"\" % name)", "docstring": "check contents dictionary to allow dict\n like access to service layers", "id": "f4766:c1:m0"} {"signature": "def __init__(self, url, version='', xml=None,username=None,password=None,parse_remote_metadata=False,headers=None,timeout=):", "body": "self.url = urlself.username = usernameself.password = passwordself.version = versionself.timeout = timeoutself.headers = headersself._capabilities = Nonereader = WMSCapabilitiesReader(self.version, url=self.url,un=self.username, pw=self.password,headers=headers)if xml: self._capabilities = reader.readString(xml)else: self._capabilities = reader.read(self.url, timeout=self.timeout)self.request = reader.requestse = self._capabilities.find('')if se is not None:err_message = str(se.text).strip()raise ServiceException(err_message)self._buildMetadata(parse_remote_metadata)", "docstring": "Initialize.", "id": "f4766:c1:m1"} {"signature": "def items(self):", "body": "items = []for item in self.contents:items.append((item, self.contents[item]))return items", "docstring": "supports dict-like items() access", "id": "f4766:c1:m3"} {"signature": "def getOperationByName(self, name):", "body": "for item in self.operations:if item.name == name:return itemraise KeyError(\"\" % name)", "docstring": "Return a named content item.", "id": "f4767:c2:m11"} {"signature": "def gettile(self, base_url=None, layer=None, style=None, format=None,tilematrixset=None, tilematrix=None, row=None, column=None,**kwargs):", "body": "vendor_kwargs = self.vendor_kwargs or {}vendor_kwargs.update(kwargs)if self.restonly:resurl = self.buildTileResource(layer, style, format, tilematrixset, tilematrix,row, column, **vendor_kwargs)u = openURL(resurl, username=self.username, password=self.password)return udata = self.buildTileRequest(layer, style, format, tilematrixset,tilematrix, row, column, **vendor_kwargs)if base_url is None:base_url = self.urltry:methods = self.getOperationByName('').methodsget_verbs = [x for x in methodsif x.get('').lower() == '']if len(get_verbs) > :base_url = next(x for x in filter(list,([pv.get('')for const in pv.get('')if '' in [x.lower() for x in const.values]]for pv in get_verbs if pv.get(''))))[]elif len(get_verbs) == :base_url = get_verbs[].get('')except StopIteration:passu = openURL(base_url, data, username=self.username,password=self.password)if u.info()[''] == '':se_xml = u.read()se_tree = etree.fromstring(se_xml)err_message = six.text_type(se_tree.find('').text)raise ServiceException(err_message.strip(), se_xml)return u", "docstring": "Return a tile from the WMTS.\n\n Returns the tile image as a file-like object.\n\n Parameters\n ----------\n base_url : string\n Optional URL for request submission. Defaults to the URL of\n the GetTile operation as declared in the GetCapabilities\n response.\n layer : string\n Content layer name.\n style : string\n Optional style name. Defaults to the first style defined for\n the relevant layer in the GetCapabilities response.\n format : string\n Optional output image format, such as 'image/jpeg'.\n Defaults to the first format defined for the relevant layer\n in the GetCapabilities response.\n tilematrixset : string\n Optional name of tile matrix set to use.\n Defaults to the first tile matrix set defined for the\n relevant layer in the GetCapabilities response.\n tilematrix : string\n Name of the tile matrix to use.\n row : integer\n Row index of tile to request.\n column : integer\n Column index of tile to request.\n **kwargs : extra arguments\n anything else e.g. vendor specific parameters\n\n Example\n -------\n >>> url = 'http://map1c.vis.earthdata.nasa.gov/wmts-geo/wmts.cgi'\n >>> wmts = WebMapTileService(url)\n >>> img = wmts.gettile(layer='VIIRS_CityLights_2012',\\\n tilematrixset='EPSG4326_500m',\\\n tilematrix='6',\\\n row=4, column=4)\n >>> out = open('tile.jpg', 'wb')\n >>> bytes_written = out.write(img.read())\n >>> out.close()", "id": "f4767:c2:m8"} {"signature": "def _buildMetadata(self, parse_remote_metadata=False):", "body": "self.updateSequence = self._capabilities.attrib.get('')serviceident = self._capabilities.find(_SERVICE_IDENTIFICATION_TAG)self.identification = ServiceIdentification(serviceident)serviceprov = self._capabilities.find(_SERVICE_PROVIDER_TAG)if serviceprov is not None:self.provider = ServiceProvider(serviceprov)self.operations = []serviceop = self._capabilities.find(_OPERATIONS_METADATA_TAG)if serviceop is not None:for elem in serviceop[:]:self.operations.append(OperationsMetadata(elem))self.contents = {}caps = self._capabilities.find(_CONTENTS_TAG)def gather_layers(parent_elem, parent_metadata):for index, elem in enumerate(parent_elem.findall(_LAYER_TAG)):cm = ContentMetadata(elem, parent=parent_metadata, index=index + ,parse_remote_metadata=parse_remote_metadata)if cm.id:if cm.id in self.contents:raise KeyError('''' % cm.id)self.contents[cm.id] = cmgather_layers(elem, cm)gather_layers(caps, None)self.tilematrixsets = {}for elem in caps.findall(_TILE_MATRIX_SET_TAG):tms = TileMatrixSet(elem)if tms.identifier:if tms.identifier in self.tilematrixsets:raise KeyError('''' % tms.identifier)self.tilematrixsets[tms.identifier] = tmsself.themes = {}for elem in self._capabilities.findall(_THEMES_TAG + '' + _THEME_TAG):theme = Theme(elem)if theme.identifier:if theme.identifier in self.themes:raise KeyError(''% theme.identifier)self.themes[theme.identifier] = themeserviceMetadataURL = self._capabilities.find(_SERVICE_METADATA_URL_TAG)if serviceMetadataURL is not None:self.serviceMetadataURL = serviceMetadataURL.attrib[_HREF_TAG]else:self.serviceMetadataURL = None", "docstring": "set up capabilities metadata objects", "id": "f4767:c2:m3"} {"signature": "def read(self, service_url, vendor_kwargs=None):", "body": "getcaprequest = self.capabilities_url(service_url, vendor_kwargs)spliturl = getcaprequest.split('')u = openURL(spliturl[], spliturl[], method='',username=self.username, password=self.password)return etree.fromstring(u.read())", "docstring": "Get and parse a WMTS capabilities document, returning an\n elementtree instance\n\n service_url is the base url, to which is appended the service,\n version, and request parameters. Optional vendor-specific\n parameters can also be supplied as a dict.", "id": "f4767:c9:m2"} {"signature": "@staticmethoddef from_elements(link_elements):", "body": "links = []for link_element in link_elements:matrix_set_elements = link_element.findall(_TILE_MATRIX_SET_TAG)if len(matrix_set_elements) == :raise ValueError('' % link_element)elif len(matrix_set_elements) > :set_limits_elements = link_element.findall(_TILE_MATRIX_SET_LIMITS_TAG)if set_limits_elements:raise ValueError('''' %link_element)for matrix_set_element in matrix_set_elements:uri = matrix_set_element.text.strip()links.append(TileMatrixSetLink(uri))else:uri = matrix_set_elements[].text.strip()tilematrixlimits = {}path = '' % (_TILE_MATRIX_SET_LIMITS_TAG,_TILE_MATRIX_LIMITS_TAG)for limits_element in link_element.findall(path):tml = TileMatrixLimits(limits_element)if tml.tilematrix:if tml.tilematrix in tilematrixlimits:msg = ('''' % tml.tilematrix)raise KeyError(msg)tilematrixlimits[tml.tilematrix] = tmllinks.append(TileMatrixSetLink(uri, tilematrixlimits))return links", "docstring": "Return a list of TileMatrixSetLink instances derived from the\ngiven list of XML elements.", "id": "f4767:c7:m0"} {"signature": "def capabilities_url(self, service_url, vendor_kwargs=None):", "body": "pieces = urlparse(service_url)args = parse_qs(pieces.query)if '' not in args:args[''] = ''if '' not in args:args[''] = ''if '' not in args:args[''] = self.versionif vendor_kwargs:args.update(vendor_kwargs)query = urlencode(args, doseq=True)pieces = ParseResult(pieces.scheme, pieces.netloc,pieces.path, pieces.params,query, pieces.fragment)return urlunparse(pieces)", "docstring": "Return a capabilities url", "id": "f4767:c9:m1"} {"signature": "def setConstraintList(self, constraints, tostring=False):", "body": "ors = []if len(constraints) == :if isinstance(constraints[], OgcExpression):flt = self.setConstraint(constraints[])else:self._root.append(And(operations=constraints[]).toXML())flt = self._rootif tostring:return util.element_to_string(flt, xml_declaration=False)else:return fltfor c in constraints:if isinstance(c, OgcExpression):ors.append(c)elif isinstance(c, list) or isinstance(c, tuple):if len(c) == :ors.append(c[])elif len(c) >= :ands = []for sub in c:if isinstance(sub, OgcExpression):ands.append(sub)ors.append(And(operations=ands))self._root.append(Or(operations=ors).toXML())if tostring:return util.element_to_string(self._root, xml_declaration=False)return self._root", "docstring": "Construct and process a GetRecords request\n\nParameters\n----------\n\n- constraints: A list of OgcExpression objects\n The list is interpretted like so:\n\n [a,b,c]\n a || b || c\n\n [[a,b,c]]\n a && b && c\n\n [[a,b],[c],[d],[e]] or [[a,b],c,d,e]\n (a && b) || c || d || e\n- tostring (optional): return as string", "id": "f4768:c0:m3"} {"signature": "def WMCElement(tag):", "body": "return etree.Element(\"\"%context_ns_uri + tag)", "docstring": "WMC based element", "id": "f4774:m0"} {"signature": "def __new__(self,url, version, xml, parse_remote_metadata=False, timeout=,username=None, password=None):", "body": "obj=object.__new__(self)obj.__init__(url, version, xml, parse_remote_metadata, timeout,username=username, password=password)return obj", "docstring": "overridden __new__ method\n\n @type url: string\n @param url: url of WFS capabilities document\n @type xml: string\n @param xml: elementtree object\n @type parse_remote_metadata: boolean\n @param parse_remote_metadata: whether to fully process MetadataURL elements\n @param timeout: time (in seconds) after which requests should timeout\n @param username: service authentication username\n @param password: service authentication password\n @return: initialized WebFeatureService_1_1_0 object", "id": "f4776:c0:m0"} {"signature": "def __getitem__(self,name):", "body": "if name in self.__getattribute__('').keys():return self.__getattribute__('')[name]else:raise KeyError(\"\" % name)", "docstring": "check contents dictionary to allow dict like access to service layers", "id": "f4776:c0:m1"} {"signature": "def _buildMetadata(self, parse_remote_metadata=False):", "body": "self.updateSequence = self._capabilities.attrib.get('')val = self._capabilities.find(util.nspath_eval('', namespaces))if val is not None:self.identification=ServiceIdentification(val,self.owscommon.namespace)val = self._capabilities.find(util.nspath_eval('', namespaces))if val is not None:self.provider=ServiceProvider(val,self.owscommon.namespace)self.operations=[]for elem in self._capabilities.findall(util.nspath_eval('', namespaces)):self.operations.append(OperationsMetadata(elem, self.owscommon.namespace))self.constraints = {}for elem in self._capabilities.findall(util.nspath_eval('', namespaces)):self.constraints[elem.attrib['']] = Constraint(elem, self.owscommon.namespace)self.parameters = {}for elem in self._capabilities.findall(util.nspath_eval('', namespaces)):self.parameters[elem.attrib['']] = Parameter(elem, self.owscommon.namespace)val = self._capabilities.find(util.nspath_eval('', namespaces))self.filters=FilterCapabilities(val)self.contents={}features = self._capabilities.findall(nspath_eval('', namespaces))for feature in features:cm=ContentMetadata(feature, parse_remote_metadata)self.contents[cm.id]=cmself.exceptions = [f.text for fin self._capabilities.findall('')]", "docstring": "set up capabilities metadata objects:", "id": "f4776:c0:m3"} {"signature": "def _makeStringIO(self, strval):", "body": "if PY2:return StringIO(strval)return StringIO(strval.decode())", "docstring": "Helper method to make sure the StringIO being returned will work.\n\nDifferences between Python 2.7/3.x mean we have a lot of cases to handle.", "id": "f4778:c1:m6"} {"signature": "def collections(self):", "body": "url = self._build_url('')LOGGER.debug(''.format(url))response = requests.get(url, headers=REQUEST_HEADERS).json()return response['']", "docstring": "implements Requirement 9 (/req/core/collections-op)\n\n@returns: collections object", "id": "f4779:c0:m2"} {"signature": "def collection_item(self, collection_name, identifier):", "body": "path = ''.format(collection_name, identifier)url = self._build_url(path)LOGGER.debug(''.format(url))response = requests.get(url, headers=REQUEST_HEADERS).json()return response", "docstring": "implements Requirement 30 (/req/core/f-op)\n\n@type collection_name: string\n@param collection_name: name of collection\n@type identifier: string\n@param identifier: feature identifier\n\n@returns: single feature result", "id": "f4779:c0:m5"} {"signature": "def _makeStringIO(self, strval):", "body": "if PY2:return StringIO(strval)return StringIO(strval.decode())", "docstring": "Helper method to make sure the StringIO being returned will work.\n\nDifferences between Python 2.7/3.x mean we have a lot of cases to handle.", "id": "f4780:c0:m6"} {"signature": "def nspath(path, ns=WFS_NAMESPACE):", "body": "components = []for component in path.split(\"\"):if component != '':component = \"\" % (ns, component)components.append(component)return \"\".join(components)", "docstring": "Prefix the given path with the given namespace identifier.\n\nParameters\n----------\npath : string\n ElementTree API Compatible path expression\n\nns : string\n The XML namespace. Defaults to WFS namespace.", "id": "f4780:m0"} {"signature": "def readString(self, st):", "body": "if not isinstance(st, str) and not isinstance(st, bytes):raise ValueError(\"\" % type(st))return etree.fromstring(st)", "docstring": "Parse a WFS capabilities document, returning an\n instance of WFSCapabilitiesInfoset\n\n string should be an XML capabilities document", "id": "f4781:c0:m3"} {"signature": "def __init__(self, version='', username=None, password=None):", "body": "self.version = versionself.username = usernameself.password = passwordself._infoset = None", "docstring": "Initialize", "id": "f4781:c0:m0"} {"signature": "def getSRS(self, srsname, typename):", "body": "if not isinstance(srsname, Crs):srs = Crs(srsname)else:srs = srsnametry:index = self.contents[typename].crsOptions.index(srs)return self.contents[typename].crsOptions[index]except ValueError:options = \"\".join(map(lambda crs: crs.id,self.contents[typename].crsOptions))log.warning(\"\"\"\"\"\", srs.getcode(), typename, options)return None", "docstring": "Returns None or Crs object for given name\n\n @param typename: feature name \n @type typename: String", "id": "f4782:c0:m1"} {"signature": "def getGETGetFeatureRequest(self, typename=None, filter=None, bbox=None, featureid=None,featureversion=None, propertyname=None, maxfeatures=None,storedQueryID=None, storedQueryParams=None,outputFormat=None, method='', startindex=None, sortby=None):", "body": "storedQueryParams = storedQueryParams or {}base_url = next((m.get('') for m in self.getOperationByName('').methods if m.get('').lower() == method.lower()))base_url = base_url if base_url.endswith(\"\") else base_url+\"\"request = {'': '', '': self.version, '': ''}if featureid:request[''] = ''.join(featureid)elif bbox:request[''] = self.getBBOXKVP(bbox,typename)elif filter:request[''] = str(filter)if typename:typename = [typename] if type(typename) == type(\"\") else typenameif int(self.version.split('')[]) >= :request[''] = ''.join(typename)else:request[''] = ''.join(typename)if propertyname: request[''] = ''.join(propertyname)if sortby:request[''] = ''.join(sortby)if featureversion: request[''] = str(featureversion)if maxfeatures: if int(self.version.split('')[]) >= :request[''] = str(maxfeatures)else:request[''] = str(maxfeatures)if startindex:request[''] = str(startindex)if storedQueryID: request['']=str(storedQueryID)for param in storedQueryParams:request[param]=storedQueryParams[param]if outputFormat is not None:request[\"\"] = outputFormatdata = urlencode(request, doseq=True)return base_url+data", "docstring": "Formulate proper GetFeature request using KVP encoding\n ----------\n typename : list\n List of typenames (string)\n filter : string \n XML-encoded OGC filter expression.\n bbox : tuple\n (left, bottom, right, top) in the feature type's coordinates == (minx, miny, maxx, maxy)\n featureid : list\n List of unique feature ids (string)\n featureversion : string\n Default is most recent feature version.\n propertyname : list\n List of feature property names. '*' matches all.\n maxfeatures : int\n Maximum number of features to be returned.\n method : string\n Qualified name of the HTTP DCP method to use.\n outputFormat: string (optional)\n Requested response format of the request.\n startindex: int (optional)\n Start position to return feature set (paging in combination with maxfeatures)\n sortby: list (optional)\n List of property names whose values should be used to order\n (upon presentation) the set of feature instances that\n satify the query.\n\n There are 3 different modes of use\n\n 1) typename and bbox (simple spatial query)\n 2) typename and filter (==query) (more expressive)\n 3) featureid (direct access to known features)", "id": "f4782:c0:m2"} {"signature": "def __init__(self, u):", "body": "self.u=uself._getType()", "docstring": "initiate with a urllib url object.", "id": "f4785:c0:m0"} {"signature": "def descCov_url(self, service_url):", "body": "qs = []if service_url.find('') != -:qs = cgi.parse_qsl(service_url.split('')[])params = [x[] for x in qs]if '' not in params:qs.append(('', ''))if '' not in params:qs.append(('', ''))if '' not in params:qs.append(('', self.version))if self.version == '':if '' not in params:qs.append(('', self.identifier))elif self.version == '':if '' not in params:qs.append(('', self.identifier))elif self.version == '':if '' not in params:qs.append(('', self.identifier))elif self.version == '' or self.version == '':if '' not in params:qs.append(('', self.identifier))if '' not in params:qs.append(('', self.identifier))qs.append(('', ''))urlqs = urlencode(tuple(qs))return service_url.split('')[] + '' + urlqs", "docstring": "Return a describe coverage url\n @type service_url: string\n @param service_url: base url of WCS service\n @rtype: string\n @return: getCapabilities URL", "id": "f4787:c3:m1"} {"signature": "def read(self, service_url, timeout=):", "body": "request = self.descCov_url(service_url)u = openURL(request, cookies=self.cookies, timeout=timeout)return etree.fromstring(u.read())", "docstring": "Get and parse a Describe Coverage document, returning an\n elementtree tree\n\n @type service_url: string\n @param service_url: The base url, to which is appended the service,\n version, and request parameters\n @rtype: elementtree tree\n @return: An elementtree tree representation of the capabilities document", "id": "f4787:c3:m2"} {"signature": "def __new__(self,url, xml, cookies):", "body": "obj=object.__new__(self)obj.__init__(url, xml, cookies)self.cookies=cookiesself._describeCoverage = {} return obj", "docstring": "overridden __new__ method \n\n @type url: string\n @param url: url of WCS capabilities document\n @type xml: string\n @param xml: elementtree object\n @return: inititalised WCSBase object", "id": "f4787:c1:m0"} {"signature": "def __init__(self, version=None, cookies = None):", "body": "self.version = versionself._infoset = Noneself.cookies = cookies", "docstring": "Initialize\n @type version: string\n @param version: WCS Version parameter e.g '1.0.0'", "id": "f4787:c2:m0"} {"signature": "def items(self):", "body": "items=[]for item in self.contents:items.append((item,self.contents[item]))return items", "docstring": "supports dict-like items() access", "id": "f4788:c1:m2"} {"signature": "def _checkChildAndParent(self, path):", "body": "try:value = self._elem.find(path).textexcept:try:value = self._parent.find(path).textexcept:value = Nonereturn value", "docstring": "checks child coverage summary, and if item not found checks higher level coverage summary", "id": "f4788:c6:m4"} {"signature": "def is_number(self,s):", "body": "try:float(s)return Trueexcept ValueError:return False", "docstring": "simple helper to test if value is number as requests with numbers dont\n need quote marks", "id": "f4789:c0:m5"} {"signature": "def __init__(self, elem, service):", "body": "self._elem=elemself._service=serviceself.id=elem.find(nsWCS2('')).textself.title = testXMLValue(elem.find(ns('')))self.abstract= testXMLValue(elem.find(ns('')))self.keywords = [f.text for f in elem.findall(ns('')+''+ns(''))]self.boundingBox=None self.boundingBoxWGS84 = Noneb = elem.find(ns(''))if b is not None:gmlpositions=b.findall('')lc=gmlpositions[].textuc=gmlpositions[].textself.boundingBoxWGS84 = (float(lc.split()[]),float(lc.split()[]),float(uc.split()[]), float(uc.split()[]),)self.styles=Noneself.crsOptions=Noneself.defaulttimeposition=None", "docstring": "Initialize. service is required so that describeCoverage requests may be made", "id": "f4789:c1:m0"} {"signature": "def items(self):", "body": "items=[]for item in self.contents:items.append((item,self.contents[item]))return items", "docstring": "supports dict-like items() access", "id": "f4789:c0:m2"} {"signature": "def getCoverage(self, identifier=None, bbox=None, time=None, format = None, subsets=None,crs=None, width=None, height=None, resx=None, resy=None, resz=None,parameter=None,method='',**kwargs):", "body": "if log.isEnabledFor(logging.DEBUG):log.debug(''%(identifier, bbox, time, format, crs, width, height, resx, resy, resz, parameter, method, str(kwargs)))try:base_url = next((m.get('') for m in self.getOperationByName('').methods if m.get('').lower() == method.lower()))except StopIteration:base_url = self.urlif log.isEnabledFor(logging.DEBUG):log.debug(''%base_url)request = {'': self.version, '': '', '':''}assert len(identifier) > request['']=identifier[]if crs:request['']=crsrequest['']=formatif width:request['']=widthif height:request['']=heightif kwargs:for kw in kwargs:request[kw]=kwargs[kw]data = urlencode(request)if subsets:for subset in subsets:if len(subset) > :if not self.is_number(subset[]):data = data + \"\"+ urlencode({\"\":subset[]+''+self.__makeString(subset[])+''+self.__makeString(subset[])+''})else:data = data + \"\"+ urlencode({\"\":subset[]+''+self.__makeString(subset[])+''+self.__makeString(subset[])+''})else:if not self.is_number(subset[]):data = data + \"\"+ urlencode({\"\":subset[]+''+self.__makeString(subset[])+''})else:data = data + \"\"+ urlencode({\"\":subset[]+''+self.__makeString(subset[])+''})if log.isEnabledFor(logging.DEBUG):log.debug(''%data)u=openURL(base_url, data, method, self.cookies)return u", "docstring": "Request and return a coverage from the WCS as a file-like object\n note: additional **kwargs helps with multi-version implementation\n core keyword arguments should be supported cross version\n example:\n cvg=wcs.getCoverage(identifier=['TuMYrRQ4'], timeSequence=['2792-06-01T00:00:00.0'], bbox=(-112,36,-106,41),format='cf-netcdf')\n\n is equivalent to:\n http://myhost/mywcs?SERVICE=WCS&REQUEST=GetCoverage&IDENTIFIER=TuMYrRQ4&VERSION=1.1.0&BOUNDINGBOX=-180,-90,180,90&TIME=2792-06-01T00:00:00.0&FORMAT=cf-netcdf\n\n example 2.0.1 URL\n http://earthserver.pml.ac.uk/rasdaman/ows?&SERVICE=WCS&VERSION=2.0.1&REQUEST=GetCoverage\n &COVERAGEID=V2_monthly_CCI_chlor_a_insitu_test&SUBSET=Lat(40,50)&SUBSET=Long(-10,0)&SUBSET=ansi(144883,145000)&FORMAT=application/netcdf\n\n cvg=wcs.getCoverage(identifier=['myID'], format='application/netcdf', subsets=[('axisName',min,max),('axisName',min,max),('axisName',min,max)])", "id": "f4789:c0:m4"} {"signature": "def getCoverage(self, identifier=None, bbox=None, time=None, format = None, crs=None, width=None, height=None, resx=None, resy=None, resz=None,parameter=None,method='',**kwargs):", "body": "if log.isEnabledFor(logging.DEBUG):log.debug(''%(identifier, bbox, time, format, crs, width, height, resx, resy, resz, parameter, method, str(kwargs)))try:base_url = next((m.get('') for m in self.getOperationByName('').methods if m.get('').lower() == method.lower()))except StopIteration:base_url = self.urlif log.isEnabledFor(logging.DEBUG):log.debug(''%base_url)request = {'': self.version, '': '', '':''}assert len(identifier) > request['']=identifierif bbox:request['']=''.join([self.__makeString(x) for x in bbox])else:request['']=Noneif time:request['']=''.join(time)if crs:request['']=crsrequest['']=formatif width:request['']=widthif height:request['']=heightif resx:request['']=resxif resy:request['']=resyif resz:request['']=reszif kwargs:for kw in kwargs:request[kw]=kwargs[kw]data = urlencode(request)if log.isEnabledFor(logging.DEBUG):log.debug(''%data)u=openURL(base_url, data, method, self.cookies)return u", "docstring": "Request and return a coverage from the WCS as a file-like object\n note: additional **kwargs helps with multi-version implementation\n core keyword arguments should be supported cross version\n example:\n cvg=wcs.getCoverage(identifier=['TuMYrRQ4'], timeSequence=['2792-06-01T00:00:00.0'], bbox=(-112,36,-106,41),format='cf-netcdf')\n\n is equivalent to:\n http://myhost/mywcs?SERVICE=WCS&REQUEST=GetCoverage&IDENTIFIER=TuMYrRQ4&VERSION=1.1.0&BOUNDINGBOX=-180,-90,180,90&TIME=2792-06-01T00:00:00.0&FORMAT=cf-netcdf", "id": "f4790:c0:m4"} {"signature": "def __init__(self, elem, service):", "body": "self._elem=elemself._service=serviceself.id=elem.find(ns('')).textself.title = testXMLValue(elem.find(ns('')))self.abstract= testXMLValue(elem.find(ns('')))self.keywords = [f.text for f in elem.findall(ns('')+''+ns(''))] self.boundingBox=None self.boundingBoxWGS84 = None b = elem.find(ns('')) if b is not None:gmlpositions=b.findall('')lc=gmlpositions[].textuc=gmlpositions[].textself.boundingBoxWGS84 = (float(lc.split()[]),float(lc.split()[]),float(uc.split()[]), float(uc.split()[]),)self.styles=Noneself.crsOptions=Noneself.defaulttimeposition=None", "docstring": "Initialize. service is required so that describeCoverage requests may be made", "id": "f4790:c5:m0"} {"signature": "def __getitem__(self,name):", "body": "if name in self.__getattribute__('').keys():return self.__getattribute__('')[name]else:raise KeyError(\"\" % name)", "docstring": "check contents dictionary to allow dict like access to service layers", "id": "f4790:c0:m0"} {"signature": "def __init__(self, elem):", "body": "self.name = elem.tag.split('')[] self.methods = []for resource in elem.findall(ns('')+ns('')+ns('')+ns('')):url = resource.attrib['']self.methods.append({'': '', '': url})for resource in elem.findall(ns('')+ns('')+ns('')+ns('')):url = resource.attrib['']self.methods.append({'': '', '': url})", "docstring": ".", "id": "f4790:c1:m0"} {"signature": "def read_string(self, st):", "body": "if not isinstance(st, bytes):raise ValueError(\"\" % type(st))return etree.fromstring(st)", "docstring": "Parse a SOS capabilities document, returning an elementtree instance\n\nst should be an XML capabilities document", "id": "f4791:c2:m3"} {"signature": "def capabilities_url(self, service_url):", "body": "qs = []if service_url.find('') != -:qs = cgi.parse_qsl(service_url.split('')[])params = [x[] for x in qs]if '' not in params:qs.append(('', ''))if '' not in params:qs.append(('', ''))if '' not in params:qs.append(('', self.version))urlqs = urlencode(tuple(qs))return service_url.split('')[] + '' + urlqs", "docstring": "Return a capabilities url", "id": "f4792:c2:m1"} {"signature": "def _parse_metadata(self, element):", "body": "pass", "docstring": "Parse metadata elements relating to timeseries:\n TS: baseTime, spacing, commentBlock, parameter\n MTS: startAnchor, endAnchor, cumulative, accAnchor/Length, maxGap", "id": "f4794:c2:m3"} {"signature": "def _parse_result(self):", "body": "if self.result is not None:result = self.result.find(nspv(\"\"))self.result = MeasurementTimeseries(result)", "docstring": "Parse the result element of the observation type", "id": "f4794:c0:m1"} {"signature": "def __init__(self, element):", "body": "self.quality = testXMLAttribute(element.find(nspv(\"\")), nspv(\"\"))self.nilReason = testXMLAttribute(element.find(nspv(\"\")), nspv(\"\"))self.comment = testXMLValue(element.find(nspv(\"\")))self.qualifier = testXMLAttribute(element.find(nspv(\"\")), nspv(\"\"))self.processing = testXMLValue(element.find(nspv(\"\")), nspv(\"\"))self.source = testXMLValue(element.find(nspv(\"\")), nspv(\"\"))", "docstring": "Base time-value pair metadata. Still to do:\n - relatedObservation", "id": "f4794:c4:m0"} {"signature": "def complex_input_with_content():", "body": "print(\"\")wps = WebProcessingService('', verbose=verbose)processid = ''textdoc = ComplexDataInput(\"\") inputs = [(\"\", textdoc)]outputs = [(\"\",True,'')]execution = wps.execute(processid, inputs, output=outputs)monitorExecution(execution)print('', execution.percentCompleted)print('', execution.statusMessage)for output in execution.processOutputs:print('' % (output.identifier, output.dataType, output.data, output.reference))", "docstring": "use ComplexDataInput with a direct content", "id": "f4801:m2"} {"signature": "def reload(self):", "body": "self.settings = self.read()", "docstring": "Re-open and read settings from file.", "id": "f4828:c0:m9"} {"signature": "def unset_value(self, key):", "body": "if key.endswith(\"\"):key = key[:-]path = key.split(\"\")curr = self.settingsfor p in path[:-]:if p not in curr:raise ConfigurationError(\"\".format(key, p))curr = curr[p]if not isinstance(curr, dict):raise ConfigurationError(\"\".format(path[-], key))if path[-] not in curr:raise ConfigurationError(\"\".format(key, path[-]))del curr[path[-]]", "docstring": "Remove a value at the given key -- and any nested values --\nfrom the configuration.\n*Note*: In order to write changes to the file, ensure that\n:meth:`~giraffez.config.Config.write` is called prior to exit.\n\n:param str key: A path to the value destination, with nested levels joined by '.'\n:raises `giraffez.errors.ConfigurationError`: if the key specifies an invalid path, or does not exist", "id": "f4828:c0:m12"} {"signature": "def create_key_file(path):", "body": "iv = \"\".format(os.urandom(), time.time())new_key = generate_key(ensure_bytes(iv))with open(path, \"\") as f:f.write(base64.b64encode(new_key))os.chmod(path, )", "docstring": "Creates a new encryption key in the path provided and sets the file\npermissions. Setting the file permissions currently does not work\non Windows platforms because of the differences in how file\npermissions are read and modified.", "id": "f4834:m0"} {"signature": "def to_archive(self, writer):", "body": "if '' not in writer.mode:raise GiraffeError(\"\")writer.write(GIRAFFE_MAGIC)writer.write(self.columns.serialize())i = for n, chunk in enumerate(self._fetchall(ROW_ENCODING_RAW), ):writer.write(chunk)yield TeradataEncoder.count(chunk)", "docstring": "Writes export archive files in the Giraffez archive format.\nThis takes a `giraffez.io.Writer` and writes archive chunks to\nfile until all rows for a given statement have been exhausted.\n\n.. code-block:: python\n\n with giraffez.BulkExport(\"database.table_name\") as export:\n with giraffez.Writer(\"database.table_name.tar.gz\", 'wb', use_gzip=True) as out:\n for n in export.to_archive(out):\n print(\"Rows: {}\".format(n))\n\n:param `giraffez.io.Writer` writer: A writer handling the archive output\n\n:rtype: iterator (yields ``int``)", "id": "f4837:c0:m4"} {"signature": "def to_dict(self):", "body": "return self._fetchall(ROW_ENCODING_DICT)", "docstring": "Sets the current encoder output to Python `dict` and returns\na row iterator.\n\n:rtype: iterator (yields ``dict``)", "id": "f4837:c0:m5"} {"signature": "def do_horse(self, line):", "body": "log.write(ascii_giraffe)", "docstring": "It totally prints a horse.", "id": "f4839:c1:m6"} {"signature": "@classmethoddef deserialize(cls, data):", "body": "column_list = cls()while data:tup, data = data[:], data[:]column_type, length, prec, scale, title_len = struct.unpack(\"\", tup)title, data = data[:title_len], data[title_len:]try:column_list.append((title, column_type, length, prec, scale))except GiraffeTypeError as error:raise GiraffeEncodeError(error)return column_list", "docstring": "Deserializes giraffez Archive header. See\n:meth:`~giraffez.types.Columns.serialize` for more information.\n\n:param str data: data in giraffez Archive format, to be deserialized\n:return: :class:`~giraffez.types.Columns` object decoded from data", "id": "f4840:c1:m15"} {"signature": "def set_filter(self, names=None):", "body": "_names = []if names:for name in names:_safe_name = safe_name(name)if _safe_name not in self._column_map:raise GiraffeTypeError(\"\".format(name))if _safe_name in _names:continue_names.append(_safe_name)self._filtered_columns = _names", "docstring": "Set the names of columns to be used when iterating through the list,\nretrieving names, etc.\n\n:param list names: A list of names to be used, or :code:`None` for all", "id": "f4840:c1:m6"} {"signature": "def readall(self):", "body": "n = for row in self:n += return n", "docstring": "Exhausts the current connection by iterating over all rows and\nreturning the total.\n\n.. code-block:: python\n\n with giraffez.Cmd() as cmd:\n results = cmd.execute(\"select * from dbc.dbcinfo\")\n print(results.readall())", "id": "f4845:c0:m4"} {"signature": "def register_graceful_shutdown_signal():", "body": "from ._teradata import register_graceful_shutdown_signal as _register_register()", "docstring": "Registers graceful shutdown handler using C signals. The first\nSIGINT will attempt to close the Teradata connections before\nexiting, and the second will shutdown whether the connections\nhave been closed or not.", "id": "f4846:m8"} {"signature": "def checkpoint(self):", "body": "return self.mload.checkpoint()", "docstring": "Execute a checkpoint while loading rows. Called automatically\nwhen loading from a file. Updates the exit code of the driver to\nreflect errors.", "id": "f4848:c0:m1"} {"signature": "@propertydef columns(self):", "body": "return self._columns", "docstring": "The list of columns in use.\n\n:getter: Return the list of columns in use.\n:setter: Set the columns to be loaded into, as well as their order. If\n loading from a file, these will be determined from the file header.\n Not necessary if you are loading into all columns, in the original\n order. The value must be a :code:`list` of names in the order that\n the fields of data will be presented in each row.\n\n Raises :class:`~giraffez.errors.GiraffeError` if :code:`field_names`\n is not a :code:`list`.\n\n Raises :class:`~giraffez.errors.GiraffeError` if the target table\n has not been set.\n:type: :class:`~giraffez.types.Columns`", "id": "f4848:c0:m3"} {"signature": "def put(self, items, panic=True):", "body": "if not self.initiated:self._initiate()try:row_status = self.mload.put_row(self.preprocessor(items))self.applied_count += except (TeradataPTError, EncoderError) as error:self.error_count += if panic:raise errorlog.info(\"\", error)", "docstring": "Load a single row into the target table.\n\n:param list items: A list of values in the row corresponding to the\n fields specified by :code:`self.columns`\n:param bool panic: If :code:`True`, when an error is encountered it will be\n raised. Otherwise, the error will be logged and :code:`self.error_count`\n is incremented.\n:raises `giraffez.errors.GiraffeEncodeError`: if :code:`panic` is :code:`True` and there\n are format errors in the row values.\n:raises `giraffez.errors.GiraffeError`: if table name is not set.\n:raises `giraffez.TeradataPTError`: if there is a problem\n connecting to Teradata.", "id": "f4848:c0:m7"} {"signature": "def find_teradata_home():", "body": "if platform.system() == '':if is_64bit():return latest_teradata_version(\"\")else:return latest_teradata_version(\"\")elif platform.system() == '':return latest_teradata_version(\"\")elif platform.system() == '':return latest_teradata_version(\"\")else:return latest_teradata_version(\"\")", "docstring": "Attempts to find the Teradata install directory with the defaults\nfor a given platform. Should always return `None` when the defaults\nare not present and the TERADATA_HOME environment variable wasn't\nexplicitly set to the correct install location.", "id": "f4851:m2"} {"signature": "def fix_compile(remove_flags):", "body": "import distutils.ccompilerdef _fix_compile(self, sources, output_dir=None, macros=None, include_dirs=None, debug=,extra_preargs=None, extra_postargs=None, depends=None):for flag in remove_flags:if flag in self.compiler_so:self.compiler_so.remove(flag)macros, objects, extra_postargs, pp_opts, build = self._setup_compile(output_dir, macros,include_dirs, sources, depends, extra_postargs)cc_args = self._get_cc_args(pp_opts, debug, extra_preargs)for obj in objects:try:src, ext = build[obj]except KeyError:continueself._compile(obj, src, ext, cc_args, extra_postargs, pp_opts)return objectsdistutils.ccompiler.CCompiler.compile = _fix_compile", "docstring": "Monkey-patch compiler to allow for removal of default compiler flags.", "id": "f4851:m0"} {"signature": "def go(): ", "body": "searcher = Searcher(source=\"\")searcher.go()", "docstring": "Default scenario", "id": "f4857:m0"} {"signature": "def execute_get_text(command): ", "body": "try:completed = subprocess.run(command,check=True,shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE)except subprocess.CalledProcessError as err:raiseelse:print(completed.stdout.decode('') + str(\"\") + completed.stderr.decode(\"\"))return completed.stdout.decode('') + completed.stderr.decode(\"\")", "docstring": "Execute shell command and return stdout txt\n:param command:\n:return:", "id": "f4862:m5"} {"signature": "def connection_remote_closed(self, connection, pn_condition):", "body": "assert len(self.receivers) == self.connection.close()", "docstring": "All senders have finished and closed, test over.", "id": "f4865:c2:m2"} {"signature": "def setup(self):", "body": "super(CyrusTest, self).setup()if not CyrusTest.CONF_DIR:raise common.Skipped(\"\")self.container1 = pyngus.Container(\"\")self.container2 = pyngus.Container(\"\")", "docstring": "Create a simple SASL configuration. This assumes saslpasswd2 is in\n the OS path, otherwise the test will be skipped.", "id": "f4866:c1:m0"} {"signature": "def _setup_receiver_sync(self):", "body": "rl_handler = common.ReceiverCallback()receiver = self.conn2.create_receiver(\"\", \"\", rl_handler)receiver.user_context = rl_handlerreceiver.open()self.process_connections()assert self.conn1_handler.sender_requested_ct == args = self.conn1_handler.sender_requested_args[]sl_handler = common.SenderCallback()sender = self.conn1.accept_sender(args.link_handle,event_handler=sl_handler)sender.user_context = sl_handlersender.open()self.process_connections()assert sender.active and sl_handler.active_ct > assert receiver.active and rl_handler.active_ct > return (sender, receiver)", "docstring": "Create links, initiated by receiver.", "id": "f4867:c0:m4"} {"signature": "def send_output(self):", "body": "try:pyngus.write_socket_output(self.connection,self.socket)except Exception as e:LOG.error(\"\", str(e))self.connection.close_output()self.connection.close()self.connection.process(time.time())", "docstring": "Called when socket is write-ready", "id": "f4871:c0:m4"} {"signature": "def send_output(self):", "body": "try:pyngus.write_socket_output(self.connection,self.socket)except Exception as e:LOG.error(\"\", str(e))self.connection.close_output()self.connection.close()self.connection.process(time.time())", "docstring": "Called when socket is write-ready", "id": "f4872:c0:m5"} {"signature": "def process_input(self):", "body": "try:pyngus.read_socket_input(self.connection, self.socket)except Exception as e:LOG.error(\"\", str(e))self.connection.close_input()self.connection.close()self.connection.process(time.time())", "docstring": "Called when socket is read-ready", "id": "f4872:c0:m4"} {"signature": "def __init__(self, container, socket_, name, properties):", "body": "self.socket = socket_self.connection = container.create_connection(name,self, properties)self.connection.user_context = selfself.connection.open()self.sender_links = set()self.receiver_links = set()", "docstring": "Create a Connection using socket_.", "id": "f4872:c0:m0"} {"signature": "def server_socket(host, port, backlog=):", "body": "addr = socket.getaddrinfo(host, port, socket.AF_INET, socket.SOCK_STREAM)if not addr:raise Exception(\"\"% (host, str(port)))my_socket = socket.socket(addr[][], addr[][], addr[][])my_socket.setblocking() try:my_socket.bind(addr[][])my_socket.listen(backlog)except socket.error as e:if e.errno != errno.EINPROGRESS:raisereturn my_socket", "docstring": "Create a TCP listening socket for a server.", "id": "f4873:m2"} {"signature": "def connect_socket(host, port, blocking=True):", "body": "addr = socket.getaddrinfo(host, port, socket.AF_INET, socket.SOCK_STREAM)if not addr:raise Exception(\"\"% (host, str(port)))my_socket = socket.socket(addr[][], addr[][], addr[][])if not blocking:my_socket.setblocking()try:my_socket.connect(addr[][])except socket.error as e:if e.errno != errno.EINPROGRESS:raisereturn my_socket", "docstring": "Create a TCP connection to the server.", "id": "f4873:m1"} {"signature": "def sender_failed(self, sender_link, error):", "body": "LOG.debug(\"\", error)sender_link.close()", "docstring": "Protocol error occurred.", "id": "f4874:c1:m5"} {"signature": "def connection_remote_closed(self, connection, pn_condition):", "body": "LOG.debug(\"\", pn_condition)connection.close()", "docstring": "Peer has closed its end of the connection.", "id": "f4876:c0:m1"} {"signature": "def sender_failed(self, sender_link, error):", "body": "LOG.debug(\"\", error)sender_link.close()", "docstring": "Protocol error occurred.", "id": "f4876:c1:m1"} {"signature": "def _ep_error(self, error):", "body": "LOG.error(\"\",self._name, error)", "docstring": "Unanticipated/illegal state change.", "id": "f4878:c0:m7"} {"signature": "def _ep_need_close(self):", "body": "LOG.debug(\"\")", "docstring": "The remote has closed its end of the endpoint.", "id": "f4878:c0:m5"} {"signature": "@propertydef _endpoint_state(self):", "body": "raise NotImplementedError(\"\")", "docstring": "Returns the current endpoint state.", "id": "f4878:c0:m2"} {"signature": "def connection_remote_closed(self, connection, pn_condition):", "body": "LOG.debug(\"\")", "docstring": "Peer has closed its end of the connection.", "id": "f4879:c1:m2"} {"signature": "def _ep_active(self):", "body": "LOG.debug(\"\")if self._handler:with self._callback_lock:self._handler.connection_active(self)", "docstring": "Both ends of the Endpoint have become active.", "id": "f4879:c2:m42"} {"signature": "def create_sender(self, source_address, target_address=None,event_handler=None, name=None, properties=None):", "body": "ident = name or str(source_address)if ident in self._sender_links:raise KeyError(\"\" % ident)session = _SessionProxy(\"\" % ident, self)session.open()sl = session.new_sender(ident)sl.configure(target_address, source_address, event_handler, properties)self._sender_links[ident] = slreturn sl", "docstring": "Factory method for Sender links.", "id": "f4879:c2:m28"} {"signature": "def _not_reentrant(func):", "body": "def wrap(self, *args, **kws):if self._callback_lock and self._callback_lock.in_callback:m = \"\" % funcraise RuntimeError(m)return func(self, *args, **kws)return wrap", "docstring": "Decorator that prevents callbacks from calling into methods that are\n not reentrant", "id": "f4879:c2:m0"} {"signature": "def reject_sender(self, link_handle, pn_condition=None):", "body": "link = self._sender_links.get(link_handle)if not link:raise Exception(\"\" % link_handle)link.reject(pn_condition)link.destroy()", "docstring": "Rejects the SenderLink, and destroys the handle.", "id": "f4879:c2:m30"} {"signature": "def sasl_done(self, connection, pn_sasl, result):", "body": "LOG.debug(\"\")", "docstring": "SASL exchange complete.", "id": "f4879:c1:m7"} {"signature": "def create_receiver(self, target_address, source_address=None,event_handler=None, name=None, properties=None):", "body": "ident = name or str(target_address)if ident in self._receiver_links:raise KeyError(\"\" % ident)session = _SessionProxy(\"\" % ident, self)session.open()rl = session.new_receiver(ident)rl.configure(target_address, source_address, event_handler, properties)self._receiver_links[ident] = rlreturn rl", "docstring": "Factory method for creating Receive links.", "id": "f4879:c2:m31"} {"signature": "@propertydef remote_container(self):", "body": "return self._pn_connection.remote_container", "docstring": "Return the name of the remote container. Should be present once the\n connection is active.", "id": "f4879:c2:m6"} {"signature": "def sasl_step(self, connection, pn_sasl):", "body": "LOG.debug(\"\")", "docstring": "DEPRECATED", "id": "f4879:c1:m6"} {"signature": "@propertydef deadline(self):", "body": "return self._next_deadline", "docstring": "Must invoke process() on or before this timestamp.", "id": "f4879:c2:m20"} {"signature": "def _ep_closed(self):", "body": "LOG.debug(\"\", self._name)", "docstring": "Both ends of the endpoint have closed.", "id": "f4880:c6:m11"} {"signature": "def new_receiver(self, name):", "body": "pn_link = self._pn_session.receiver(name)return self.request_receiver(pn_link)", "docstring": "Create a new receiver link.", "id": "f4880:c6:m4"} {"signature": "def link_destroyed(self, link):", "body": "self._links.discard(link)if not self._links:LOG.debug(\"\")self._pn_session.close()self._pn_session.free()self._pn_session = Noneself._connection = None", "docstring": "Link has been destroyed.", "id": "f4880:c6:m6"} {"signature": "@propertydef source_address(self):", "body": "if self._pn_link.is_sender:return self._pn_link.source.addresselse:return self._pn_link.remote_source.address", "docstring": "Return the authorative source of the link.", "id": "f4880:c1:m7"} {"signature": "def _ep_active(self):", "body": "LOG.debug(\"\", self._name)", "docstring": "Both ends of the Endpoint have become active.", "id": "f4880:c6:m9"} {"signature": "def configure(self, target_address, source_address, handler, properties):", "body": "self._handler = handlerself._properties = propertiesdynamic_props = Noneif properties:dynamic_props = properties.get(\"\")mode = _dist_modes.get(properties.get(\"\"))if mode is not None:self._pn_link.source.distribution_mode = modemode = _snd_settle_modes.get(properties.get(\"\"))if mode is not None:self._pn_link.snd_settle_mode = modemode = _rcv_settle_modes.get(properties.get(\"\"))if mode is not None:self._pn_link.rcv_settle_mode = modeif target_address is None:if not self._pn_link.is_sender:raise Exception(\"\")self._pn_link.target.dynamic = Trueif dynamic_props:self._pn_link.target.properties.clear()self._pn_link.target.properties.put_dict(dynamic_props)elif target_address:self._pn_link.target.address = target_addressif source_address is None:if not self._pn_link.is_receiver:raise Exception(\"\")self._pn_link.source.dynamic = Trueif dynamic_props:self._pn_link.source.properties.clear()self._pn_link.source.properties.put_dict(dynamic_props)elif source_address:self._pn_link.source.address = source_address", "docstring": "Assign addresses, properties, etc.", "id": "f4880:c1:m1"} {"signature": "def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,to_dir=os.curdir, delay=):", "body": "to_dir = os.path.abspath(to_dir)try:from urllib.request import urlopenexcept ImportError:from urllib2 import urlopentgz_name = \"\" % versionurl = download_base + tgz_namesaveto = os.path.join(to_dir, tgz_name)src = dst = Noneif not os.path.exists(saveto): try:log.warn(\"\", url)src = urlopen(url)data = src.read()dst = open(saveto, \"\")dst.write(data)finally:if src:src.close()if dst:dst.close()return os.path.realpath(saveto)", "docstring": "Download distribute from a specified location and return its filename\n\n `version` should be a valid distribute version number that is available\n as an egg for download under the `download_base` URL (which should end\n with a '/'). `to_dir` is the directory where the egg will be downloaded.\n `delay` is the number of seconds to pause before an actual download\n attempt.", "id": "f4888:m4"} {"signature": "def main(argv, version=DEFAULT_VERSION):", "body": "tarball = download_setuptools()_install(tarball)", "docstring": "Install or upgrade setuptools and EasyInstall", "id": "f4888:m17"} {"signature": "def _extractall(self, path=\"\", members=None):", "body": "import copyimport operatorfrom tarfile import ExtractErrordirectories = []if members is None:members = selffor tarinfo in members:if tarinfo.isdir():directories.append(tarinfo)tarinfo = copy.copy(tarinfo)tarinfo.mode = self.extract(tarinfo, path)if sys.version_info < (, ):def sorter(dir1, dir2):return cmp(dir1.name, dir2.name)directories.sort(sorter)directories.reverse()else:directories.sort(key=operator.attrgetter(''), reverse=True)for tarinfo in directories:dirpath = os.path.join(path, tarinfo.name)try:self.chown(tarinfo, dirpath)self.utime(tarinfo, dirpath)self.chmod(tarinfo, dirpath)except ExtractError:e = sys.exc_info()[]if self.errorlevel > :raiseelse:self._dbg(, \"\" % e)", "docstring": "Extract all members from the archive to the current working\n directory and set owner, modification time and permissions on\n directories afterwards. `path' specifies a different directory\n to extract to. `members' is optional and must be a subset of the\n list returned by getmembers().", "id": "f4888:m16"} {"signature": "def __eq__(self, oth) -> bool:", "body": "return self.name == oth", "docstring": "Use by all __contains__ call when we do some 'in' test", "id": "f4921:c9:m2"} {"signature": "def unify(self, oth_type_def: TypeExprComponent, blhs, brhs) -> TypeExprComponent:", "body": "print(\"\" % (self, oth_type_def))if self.type_def is not None:if type(oth_type_def) is UnknownName:if oth_type_def is None:oth_type_def.type_def = self.type_defreturn self.type_defreturn self.type_def.unify(oth_type_def, blhs, brhs)if self.type_def is None:self.type_def = Overload()if oth_type_def not in self.type_def:self.type_def.append(oth_type_def)return oth_type_def", "docstring": "When we unify an Unknown Name vs another type def we always match", "id": "f4921:c2:m1"} {"signature": "def unify(self, oth_type_def: TypeExprComponent, blhs, brhs) -> TypeExprComponent:", "body": "print(\"\" % (self, oth_type_def))if type(oth_type_def) is not Fun:if type(oth_type_def) is T:return self[].unify(oth_type_def, blhs, brhs)raise \"\"diff_len = len(self) - len(oth_type_def)if diff_len < : return Nonefor a, b in zip(reversed(self), reversed(oth_type_def)):if not a.unify(b, blhs, brhs):return Nonereturn Fun(*self[:diff_len])", "docstring": "When we unify a function vs another type def we match each term and we return the rest.\n\nt1 -> t2 -> t3 ?? t1 -> t2 match and we return t3\nt1 -> t2 ?? t1 -> t2 -> t3 didn't match\n\nNote: the first element is the return type", "id": "f4921:c5:m0"} {"signature": "@meta.rule(parsing.Parser, \"\")def ignore_cxx(self) -> bool:", "body": "self._stream.save_context()while not self.read_eof():idxref = self._stream.indexif self._stream.peek_char in \"\":while (not self.read_eof()and self._stream.peek_char in \"\"):self._stream.incpos()if self.peek_text(\"\"):while not self.read_eof() and not self.peek_char(\"\"):self._stream.incpos()if not self.read_char(\"\") and self.read_eof():return self._stream.validate_context()if self.peek_text(\"\"):while not self.read_eof() and not self.peek_text(\"\"):self._stream.incpos()if not self.read_text(\"\") and self.read_eof():return self._stream.restore_context()if idxref == self._stream.index:breakreturn self._stream.validate_context()", "docstring": "Consume comments and whitespace characters.", "id": "f4927:m0"} {"signature": "def catend(dst: str, src: str, indent) -> str:", "body": "res = dsttxtsrc = srcif not isinstance(src, str):txtsrc = str(src)for c in list(txtsrc):if len(res) > and res[-] == '':res += (indentable.char_indent * indentable.num_indent) *(indent - ) + celse:res += creturn res", "docstring": "cat two strings but handle \\n for tabulation", "id": "f4930:m0"} {"signature": "def list_set_indent(lst: list, indent: int=):", "body": "for i in lst:if isinstance(i, indentable):i.set_indent(indent)if isinstance(i, list):list_set_indent(i, indent)", "docstring": "recurs into list for indentation", "id": "f4930:m1"} {"signature": "def populate_from_sequence(seq: list, r: ref(Edge), sr: state.StateRegister):", "body": "base_state = ridxlast = len(seq) - idx = for m in seq:if isinstance(m, list):for item in m:populate_from_sequence(item, r, sr)elif isinstance(m, MatchExpr):eX = r().get_next_edge(m)if eX is None:sX = Noneif idx != idxlast:sX = state.State(sr)sX.matchDefault(base_state().s)else:sX = base_state().seX = Edge(sX)r().next_edge[id(sX)] = eXm.attach(r().s, sX, sr)r = ref(eX)idx += ", "docstring": "function that connect each other one sequence of MatchExpr.", "id": "f4931:m0"} {"signature": "def populate_state_register(all_seq: [list], sr: state.StateRegister) -> Edge:", "body": "s0 = state.State(sr)s0.matchDefault(s0)sr.set_default_state(s0)e0 = Edge(s0)for seq in all_seq:r = ref(e0)populate_from_sequence(seq, r, sr)return e0", "docstring": "function that create a state for all instance\n of MatchExpr in the given list and connect each others.", "id": "f4931:m1"} {"signature": "def to_png_file(self, fname: str):", "body": "cmd = pipes.Template()cmd.append('' % fname, '')with cmd.open('', '') as f:f.write(self.to_dot())", "docstring": "write a '.png' file.", "id": "f4933:c1:m9"} {"signature": "def resetLivingState(self):", "body": "must_delete = []l = len(self.ls)for idx, ls in zip(range(l), self.ls):ids = id(ls[].thestate())if ids == id(ls[]) and (ls[].have_finish or not ls[].alive):must_delete.append(idx)elif ls[].alive:ls[].alive = Falsefor delete in reversed(must_delete):self.ls.pop(delete)self.init_all()", "docstring": "Only one Living State on the S0 of each StateRegister", "id": "f4933:c15:m15"} {"signature": "@meta.hook(EBNF, \"\")def add_optional(self, repeat):", "body": "repeat.functor = parsing.RepOptionalreturn True", "docstring": "Create a tree.RepOptional", "id": "f4934:m13"} {"signature": "def get_rules(self) -> parsing.Node:", "body": "res = Nonetry:res = self.eval_rule('')if not res:self.diagnostic.notify(error.Severity.ERROR,\"\" % self._lastRule,error.LocationInfo.from_maxstream(self._stream))raise self.diagnosticexcept error.Diagnostic as d:d.notify(error.Severity.ERROR,\"\" % self._lastRule)raise dreturn res", "docstring": "Parse the DSL and provide a dictionnaries of all resulting rules.\nCall by the MetaGrammar class.\n\nTODO: could be done in the rules property of parsing.BasicParser???", "id": "f4934:c0:m0"} {"signature": "@meta.hook(EBNF, \"\")def param_num(self, param, n):", "body": "param.pair = (int(self.value(n)), int)return True", "docstring": "Parse a int in parameter list", "id": "f4934:m17"} {"signature": "@meta.hook(EBNF, \"\")def add_rpt(self, sequence, mod, pt):", "body": "modstr = self.value(mod)if modstr == '':self._stream.restore_context()self.diagnostic.notify(error.Severity.ERROR,\"\",error.LocationInfo.from_stream(self._stream, is_error=True))raise self.diagnosticif modstr == '':self._stream.restore_context()self.diagnostic.notify(error.Severity.ERROR,\"\",error.LocationInfo.from_stream(self._stream, is_error=True))raise self.diagnosticoldnode = sequencesequence.parser_tree = pt.functor(oldnode.parser_tree)return True", "docstring": "Add a repeater to the previous sequence", "id": "f4934:m9"} {"signature": "@meta.hook(EBNF, \"\")def param_str(self, param, s):", "body": "param.pair = (self.value(s).strip(''), str)return True", "docstring": "Parse a str in parameter list", "id": "f4934:m18"} {"signature": "@meta.hook(EBNF, \"\")def add_mod(self, seq, mod):", "body": "modstr = self.value(mod)if modstr == '':seq.parser_tree = parsing.Complement(seq.parser_tree)elif modstr == '':seq.parser_tree = parsing.LookAhead(seq.parser_tree)elif modstr == '':seq.parser_tree = parsing.Neg(seq.parser_tree)elif modstr == '':seq.parser_tree = parsing.Until(seq.parser_tree)return True", "docstring": "Create a tree.{Complement, LookAhead, Neg, Until}", "id": "f4934:m0"} {"signature": "def set_one(chainmap, thing_name, callobject):", "body": "namespaces = reversed(thing_name.split(\"\"))lstname = []for name in namespaces:lstname.insert(, name)strname = ''.join(lstname)chainmap[strname] = callobject", "docstring": "Add a mapping with key thing_name for callobject in chainmap with\n namespace handling.", "id": "f4935:m2"} {"signature": "def directive(directname=None):", "body": "global _directivesclass_dir_list = _directivesdef wrapper(f):nonlocal directnameif directname is None:directname = f.__name__f.ns_name = directnameset_one(class_dir_list, directname, f)return freturn wrapper", "docstring": "Attach a class to a parsing class and register it as a parser directive.\n\n The class is registered with its name unless directname is provided.", "id": "f4935:m7"} {"signature": "@meta.rule(Parser, \"\")def read_integer(self) -> bool:", "body": "if self.read_eof():return Falseself._stream.save_context()c = self._stream.peek_charif c.isdigit():self._stream.incpos()while not self.read_eof():c = self._stream.peek_charif not c.isdigit():breakself._stream.incpos()return self._stream.validate_context()return self._stream.restore_context()", "docstring": "read a number\nRead following BNF rule else return False::\n\n readInteger = [\n ['0'..'9']+\n ]", "id": "f4936:m6"} {"signature": "def get_tag(self, name: str) -> Tag:", "body": "return self.tag_cache[name]", "docstring": "Extract the string previously saved.", "id": "f4936:c1:m12"} {"signature": "def pop_ignore(self) -> bool:", "body": "self._ignores.pop()return True", "docstring": "Remove the last ignore convention", "id": "f4936:c1:m30"} {"signature": "def pop_stream(self):", "body": "s = self._streams.pop()self.clean_tmp(s)", "docstring": "Pop the last Stream pushed on to the parser stack.", "id": "f4936:c1:m9"} {"signature": "def begin_tag(self, name: str) -> Node:", "body": "self.tag_cache[name] = Tag(self._stream, self._stream.index)return True", "docstring": "Save the current index under the given name.", "id": "f4936:c1:m10"} {"signature": "@meta.rule(Parser, \"\")def read_hex_integer(self) -> bool:", "body": "if self.read_eof():return Falseself._stream.save_context()c = self._stream.peek_charif c.isdigit() or ('' <= c.lower() and c.lower() <= ''):self._stream.incpos()while not self.read_eof():c = self._stream.peek_charif not (c.isdigit() or ('' <= c.lower() and c.lower() <= '')):breakself._stream.incpos()return self._stream.validate_context()return self._stream.restore_context()", "docstring": "read a hexadecimal number\nRead the following BNF rule else return False::\n\n readHexInteger = [\n [ '0'..'9' | 'a'..'f' | 'A'..'F' ]+\n ]", "id": "f4936:m4"} {"signature": "def read_char(self, c: str) -> bool:", "body": "if self.read_eof():return Falseself._stream.save_context()if c == self._stream.peek_char:self._stream.incpos()return self._stream.validate_context()return self._stream.restore_context()", "docstring": "Consume the c head byte, increment current index and return True\nelse return False. It use peekchar and it's the same as '' in BNF.", "id": "f4936:c1:m22"} {"signature": "@classmethoddef set_directives(cls, directives: dict) -> bool:", "body": "meta._directives = meta._directives.new_child()for dir_name, dir_pt in directives.items():meta.set_one(meta._directives, dir_name, dir_pt)dir_pt.ns_name = dir_namereturn True", "docstring": "Merge internal directives set with the given directives.\nFor working directives, attach it only in the dsl.Parser class", "id": "f4936:c1:m16"} {"signature": "@classmethoddef set_rules(cls, rules: dict) -> bool:", "body": "cls._rules = cls._rules.new_child()for rule_name, rule_pt in rules.items():if '' not in rule_name:rule_name = cls.__module__+ '' + cls.__name__+ '' + rule_namemeta.set_one(cls._rules, rule_name, rule_pt)return True", "docstring": "Merge internal rules set with the given rules", "id": "f4936:c1:m14"} {"signature": "@propertydef nstream(self) -> int:", "body": "return len(self._streams)", "docstring": "Return the number of opened stream", "id": "f4936:c1:m3"} {"signature": "def ignore_blanks(self) -> bool:", "body": "self._stream.save_context()if not self.read_eof() and self._stream.peek_char in \"\":while (not self.read_eof()and self._stream.peek_char in \"\"):self._stream.incpos()return self._stream.validate_context()return self._stream.validate_context()", "docstring": "Consume whitespace characters.", "id": "f4936:c1:m28"} {"signature": "def eval_hook(self, name: str, ctx: list) -> Node:", "body": "if name not in self.__class__._hooks:self.diagnostic.notify(error.Severity.ERROR,\"\" % name,error.LocationInfo.from_stream(self._stream, is_error=True))raise self.diagnosticself._lastRule = '' + nameres = self.__class__._hooks[name](self, *ctx)if type(res) is not bool:raise TypeError(\"\" % name)return res", "docstring": "Evaluate the hook by its name", "id": "f4936:c1:m18"} {"signature": "def read_until_eof(self) -> bool:", "body": "if self.read_eof():return Trueself._stream.save_context()while not self.read_eof():self._stream.incpos()return self._stream.validate_context()", "docstring": "Consume all the stream. Same as EOF in BNF.", "id": "f4936:c1:m24"} {"signature": "@meta.rule(BasicParser, \"\")def read_eof(self) -> bool:", "body": "return self._stream.index == self._stream.eos_index", "docstring": "Returns true if reached end of the stream.", "id": "f4936:m2"} {"signature": "def peek_text(self, text: str) -> bool:", "body": "start = self._stream.indexstop = start + len(text)if stop > self._stream.eos_index:return Falsereturn self._stream[self._stream.index:stop] == text", "docstring": "Same as readText but doesn't consume the stream.", "id": "f4936:c1:m20"} {"signature": "@propertydef max_readed_position(self) -> Position:", "body": "return Position(self._maxindex, self._maxline, self._maxcol)", "docstring": "The index of the deepest character readed.", "id": "f4939:c0:m6"} {"signature": "@propertydef eos_index(self) -> int:", "body": "return self._len", "docstring": "End Of Stream index.", "id": "f4939:c2:m4"} {"signature": "def validate_context(self) -> bool:", "body": "del self._contexts[-]return True", "docstring": "Discard previous saved position.", "id": "f4939:c2:m14"} {"signature": "def restore_context(self) -> bool:", "body": "self._cursor.position = self._contexts.pop()return False", "docstring": "Rollback to previous saved position.", "id": "f4939:c2:m13"} {"signature": "@propertydef last_readed_line(self) -> str:", "body": "mpos = self._cursor.max_readed_positionmindex = mpos.indexprevline = mindex - if mindex == self.eos_index else mindexwhile prevline >= and self._content[prevline] != '':prevline -= nextline = mindexwhile nextline < self.eos_index and self._content[nextline] != '':nextline += last_line = self._content[prevline + :nextline]return last_line", "docstring": "Usefull string to compute error message.", "id": "f4939:c2:m9"} {"signature": "def step_prev_line(self):", "body": "if len(self._eol) > :self.position = self._eol.pop()", "docstring": "Sets cursor as end of previous line.", "id": "f4939:c0:m10"} {"signature": "@propertydef position(self) -> Position:", "body": "return Position(self._index, self._lineno, self._col_offset)", "docstring": "The current position of the cursor.", "id": "f4939:c0:m4"} {"signature": "def check(self, ndict: dict, info=\"\") -> bool:", "body": "def iscycle(thing, ndict: dict, info: str) -> bool:idthing = id(thing)ndict[info] = idthingif idthing not in ndict:ndict[idthing] = \"\" % (type(thing), info)return Falseelse:ndict[idthing] += \"\" % (type(thing), info)return Truedef recurs(thing, ndict: dict, info: str) -> bool:if not iscycle(thing, ndict, info):res = Falseif isinstance(thing, list):idx = for i in thing:res |= recurs(i, ndict, \"\" % (info, idx))idx += elif isinstance(thing, Node):res |= thing.check(ndict, info)elif isinstance(thing, dict):for k, v in thing.items():res |= recurs(v, ndict, \"\" % (info, k))return resreturn Trueif len(ndict) == :ndict[''] = id(self)info = ''if not iscycle(self, ndict, info):res = Falseif len(self) > :if hasattr(self, ''):keys = list(self.keys())for k in keys:ndict[\"\" + repr(k) + \"\"] = id(self[k])res |= recurs(self[k], ndict, \"\" % (info, k))keys = list(vars(self).keys())for k in keys:ndict[\"\" + k] = id(getattr(self, k))res |= recurs(getattr(self, k), ndict, \"\" % (info, k))return resreturn True", "docstring": "Debug method, help detect cycle and/or\nother incoherence in a tree of Node", "id": "f4941:c0:m2"} {"signature": "def values(self):", "body": "tmp = selfwhile tmp is not None:yield tmp.datatmp = tmp.next", "docstring": "in order", "id": "f4941:c6:m13"} {"signature": "def after_parse(self, node: parsing.Node) -> parsing.Node:", "body": "return node", "docstring": "If you want to do some stuff after parsing, overload this...", "id": "f4942:c1:m0"} {"signature": "@meta.hook(BasicParser, \"\")def pred_neq(self, n, val):", "body": "v1 = n.valuev2 = valif hasattr(val, ''):v2 = val.valueif isinstance(v1, int) and not isinstance(v2, int):return v1 != int(v2)return v1 != v2", "docstring": "Test if a node set with setint or setstr not equal a certain value\n\nexample::\n\n R = [\n __scope__:n\n ['a' #setint(n, 12) | 'b' #setint(n, 14)]\n C\n [#neq(n, 12) D]\n ]", "id": "f4943:m3"} {"signature": "@meta.hook(BasicParser, \"\")def dump_nodes(self):", "body": "print(\"\")try:print(\"\")for k, v in self.id_cache.items():print(\"\" % (k, v))print(\"\")for k, v in self.tag_cache.items():print(\"\" % (k, v))print(\"\")for k, v in self.rule_nodes.items():txt = \"\" % (k, id(v))if k in self.tag_cache:tag = self.tag_cache[k]txt += \"\" % tagk = \"\" % (tag._begin, tag._end)if k in self._stream.value_cache:txt += \"\" % self._stream.value_cache[k]print(txt)except Exception as err:print(\"\" % err)import syssys.stdout.flush()return True", "docstring": "Dump tag,rule,id and value cache. For debug.\n\nexample::\n\n R = [\n #dump_nodes\n ]", "id": "f4946:m0"} {"signature": "@meta.hook(BasicParser, \"\")def set_node_as_int(self, dst, src):", "body": "dst.value = self.value(src)return True", "docstring": "Set a node to a value captured from another node\n\nexample::\n\n R = [\n In : node #setcapture(_, node)\n ]", "id": "f4947:m1"} {"signature": "@meta.hook(BasicParser, \"\")def get_subnode(self, dst, ast, expr):", "body": "dst.value = eval('' + expr)return True", "docstring": "get the value of subnode\n\nexample::\n\n R = [\n __scope__:big getsomethingbig:>big\n #get(_, big, '.val') // copy big.val into _\n ]", "id": "f4947:m4"} {"signature": "@meta.add_method(functors.SkipIgnore)def to_cython(self, genstate) -> fmt.indentable:", "body": "return ''", "docstring": "TODO: A quite important part of C transform... rethink directive/decoration/etc...", "id": "f4948:m3"} {"signature": "@meta.add_method(Translator)def to_fmt(self, with_from=False) -> fmt.indentable:", "body": "txt = fmt.sep(\"\", [fmt.sep(\"\",[self._type_source,\"\",self._type_target,'',self._fun.to_fmt()]),self._notify.get_content(with_from)])return txt", "docstring": "Return a Fmt representation of Translator for pretty-printing", "id": "f4952:m3"} {"signature": "def get_resolved_names(self, type_name: TypeName) -> list:", "body": "if not isinstance(type_name, TypeName):raise Exception(\"\"% type(type_name))rnames = []for name in type_name.components:if name not in self.resolution:raise Exception(\"\" % name)rname = self.resolution[name]if rname is not None:rname = rname().show_name()else:rname = namernames.append(rname)return rnames", "docstring": "Use self.resolution to subsitute type_name.\nAllow to instanciate polymorphic type ?1, ?toto", "id": "f4956:c0:m20"} {"signature": "def set_parent(self, parent) -> object:", "body": "ret = selfif parent is not None:ret = self._sig.set_parent(parent)self.resolve()elif not hasattr(self, ''):self.parent = Nonereturn ret", "docstring": "When we add a parent (from Symbol), don't forget to resolve.", "id": "f4956:c0:m16"} {"signature": "def use_variadic_types(self, list_type: [TypeName]):", "body": "self._variadic_types = list_typeself.resolve()", "docstring": "Attach a list of types for extra variadic argument of a call", "id": "f4956:c0:m18"} {"signature": "def set_parent(self, parent) -> object:", "body": "return self._sig.set_parent(parent)", "docstring": "Forward to intern Signature", "id": "f4956:c0:m12"} {"signature": "def internal_name(self):", "body": "unq = super().internal_name()if self.tret is not None:unq += \"\" + self.tretreturn unq", "docstring": "Return the unique internal name", "id": "f4957:c0:m1"} {"signature": "def __getstate__(self):", "body": "state = self.__dict__.copy()del state['']return state", "docstring": "For pickle don't handle weakrefs...", "id": "f4959:c0:m1"} {"signature": "def get_scope_list(self) -> list:", "body": "lstparent = [self]p = self.get_parent()while p is not None:lstparent.append(p)p = p.get_parent()return lstparent", "docstring": "Return the list of all contained scope from global to local", "id": "f4959:c0:m4"} {"signature": "def internal_name(self) -> str:", "body": "unq = \"\".join(self.get_scope_names())return unq", "docstring": "Returns the namespace's internal_name.", "id": "f4959:c0:m7"} {"signature": "def infer_block(self, body, diagnostic=None):", "body": "for e in body:e.infer_node = InferNode(parent=self.infer_node)e.infer_type(diagnostic=diagnostic)", "docstring": "Infer type on block is to type each of is sub-element", "id": "f4960:c1:m3"} {"signature": "def infer_subexpr(self, expr, diagnostic=None):", "body": "expr.infer_node = InferNode(parent=self.infer_node)expr.infer_type(diagnostic=diagnostic)", "docstring": "Infer type on the subexpr", "id": "f4960:c1:m4"} {"signature": "def type_algos(self) -> ('','', ['']):", "body": "raise Exception((\"\" +\"\" +\"\") % type(self).__name__)", "docstring": "Sub class must return a Tuple of 3 elements:\n - the method to use to infer type.\n - the list of params to used when infer a type.\n - the method to use when feedback a type.\n\nThis is useful to connect AST members to generic algo or\nto overload some specific semantic for your language.", "id": "f4960:c1:m0"} {"signature": "def __isub__(self, sig: Scope) -> Scope:", "body": "return self.difference_update(sig)", "docstring": "-= operator", "id": "f4964:c1:m19"} {"signature": "def symmetric_difference_update(self, oset: Scope) -> Scope:", "body": "skey = set()keys = list(self._hsig.keys())for k in keys:if k in oset:skey.add(k)for k in oset._hsig.keys():if k not in skey:self._hsig[k] = oset.get(k)for k in skey:del self._hsig[k]return self", "docstring": "Remove common values\n and Update specific values from another Set", "id": "f4964:c1:m24"} {"signature": "def union(self, sig: Scope) -> Scope:", "body": "new = Scope(sig=self._hsig.values(), state=self.state)new |= sigreturn new", "docstring": "Create a new Set produce by the union of 2 Set", "id": "f4964:c1:m14"} {"signature": "def __init__(self, name: str=None, sig: [Signature]=None,state=StateScope.FREE, is_namespace=True):", "body": "if name is not None and not isinstance(name, str):raise TypeError(\"\")super().__init__(name)self.need_feedback = Falseself.state = stateself.is_namespace = is_namespaceself.mapTypeTranslate = MapSourceTranslate()self.astTranslatorInjector = Noneself._ntypes = self._nvars = self._nfuns = self._hsig = {}if sig is not None:if isinstance(sig, Signature) or isinstance(sig, Scope):self.add(sig)elif len(sig) > :self.update(sig)", "docstring": "Unnamed scope for global scope\n\n A Scope have basically 3 possibles states:\n\n FREE: it's a standalone Scope, generally the global Scope\n LINKED: the Scope is related to another Scope for type resolution,\n like compound statement\n EMBEDDED: the Scope was added into another Scope,\n it forwards all 'in' calls...like namespacing\n\n A Scope could or couldn't act like a namespace.\n All Signature added into a 'namespaced' Scope was prefixed\n by the Scope name.", "id": "f4964:c1:m1"} {"signature": "def __xor__(self, sig: Scope) -> Scope:", "body": "return self.symmetric_difference(sig)", "docstring": "^ operator", "id": "f4964:c1:m25"} {"signature": "def __repr__(self) -> str:", "body": "return repr(self._hsig)", "docstring": "Internal representation", "id": "f4964:c1:m8"} {"signature": "def values(self) -> [Signature]:", "body": "if self.state == StateScope.EMBEDDED and self.parent is not None:return list(self._hsig.values()) + list(self.parent().values())else:return self._hsig.values()", "docstring": "Retrieve all values", "id": "f4964:c1:m33"} {"signature": "def get_all_polymorphic_return(self) -> bool:", "body": "lst = []for s in self.values():if hasattr(s, '') and s.tret.is_polymorphic:lst.append(EvalCtx.from_sig(s))rscope = Scope(sig=lst, state=StateScope.LINKED, is_namespace=False)rscope.set_parent(self)return rscope", "docstring": "For now, polymorphic return type are handle by symbol artefact.\n\n --> possible multi-polymorphic but with different constraint attached!", "id": "f4964:c1:m41"} {"signature": "def __and__(self, sig: Scope) -> Scope:", "body": "return self.intersection(sig)", "docstring": "& operator", "id": "f4964:c1:m17"} {"signature": "def remove(self, it: Signature) -> bool:", "body": "txt = it.internal_name()if txt not in self._hsig:raise KeyError(it.show_name() + '')sig = self._hsig[txt]if isinstance(sig, Scope):sig.state = StateScope.LINKEDdel self._hsig[txt]return True", "docstring": "Remove it but raise KeyError if not found", "id": "f4964:c1:m28"} {"signature": "def __contains__(self, s: Signature) -> bool:", "body": "from pyrser.type_system.type import Typefound = Falsetxt = \"\"if isinstance(s, Signature):txt = s.internal_name()elif isinstance(s, Type):txt = s.type_nameelif isinstance(s, str):txt = sfound = (txt in self._hsig)if not found and self.parent is not None:if self.state != StateScope.FREE and isinstance(s, Type):found = (s.type_name in self.parent())if self.state == StateScope.EMBEDDED:found = (txt in self.parent())return found", "docstring": "check if a Signature or a Type is declared in a Scope", "id": "f4964:c1:m10"} {"signature": "def add(self, it: Signature) -> bool:", "body": "if isinstance(it, Scope):it.state = StateScope.EMBEDDEDtxt = it.internal_name()it.set_parent(self)if self.is_namespace:txt = it.internal_name()if txt == \"\":txt = '' + str(len(self._hsig))if txt in self._hsig:raise KeyError(\"\" % txt)self._hsig[txt] = itself.__update_count()return True", "docstring": "Add it to the Set", "id": "f4964:c1:m27"} {"signature": "def __update_count(self):", "body": "self._ntypes = self.count_types()self._nvars = self.count_vars()self._nfuns = self.count_funs()", "docstring": "Update internal counters", "id": "f4964:c1:m7"} {"signature": "@meta.add_method(node.Node)def to_yml(self):", "body": "pp = fmt.tab([])to_yml_item(self, pp.lsdata, \"\")return str(pp)", "docstring": "Allow to get the YML string representation of a Node.::\n\n from pyrser.passes import to_yml\n\n t = Node()\n ...\n print(str(t.to_yml()))", "id": "f4967:m0"} {"signature": "def visit_Seq(self, node: parsing.Seq) -> [ast.stmt] or ast.expr:", "body": "exprs, stmts = [], []for clause in node.ptlist:clause_ast = self.visit(clause)if isinstance(clause_ast, ast.expr):exprs.append(clause_ast)else:if exprs:stmts.extend(self.combine_exprs_for_clauses(exprs))exprs = []stmts.extend(self._clause(clause_ast))if not stmts:return ast.BoolOp(ast.And(), exprs)if exprs:stmts.extend(self.combine_exprs_for_clauses(exprs))return stmts", "docstring": "Generates python code for clauses.\n\n #Continuous clauses which can can be inlined are combined with and\n clause and clause\n\n if not clause:\n return False\n if not clause:\n return False", "id": "f4969:c0:m12"} {"signature": "def __exit_scope(self) -> ast.stmt:", "body": "if self.in_optional:return ast.Pass()if self.in_try:return ast.Raise(ast.Call(ast.Name('', ast.Load()), [], [], None, None),None)if self.in_loop:return ast.Break()return ast.Return(ast.Name('', ast.Load()))", "docstring": "Create the appropriate scope exiting statement.\n\n The documentation only shows one level and always uses\n 'return False' in examples.\n\n 'raise AltFalse()' within a try.\n 'break' within a loop.\n 'return False' otherwise.", "id": "f4969:c0:m2"} {"signature": "def visit_CallTrue(self, node: parsing.CallTrue) -> ast.expr:", "body": "return ast.Lambda(ast.arguments([], None, None, [], None, None, [], []),ast.BoolOp(ast.Or(),[self.visit_Call(node),ast.Name('', ast.Load())]))", "docstring": "Generates python code calling the function and returning True.\n\n lambda: fn(*args) or True", "id": "f4969:c0:m5"} {"signature": "def visit_Call(self, node: parsing.Call) -> ast.expr:", "body": "return ast.Call(ast.Attribute(ast.Name('', ast.Load),node.callObject.__name__,ast.Load()),[ast.Str(param) for param in node.params],[],None,None)", "docstring": "Generates python code calling the function.\n\n fn(*args)", "id": "f4969:c0:m4"} {"signature": "def visit_Rule(self, node: parsing.Rule) -> ast.expr:", "body": "return ast.Call(ast.Attribute(ast.Name('', ast.Load()),'', ast.Load()),[ast.Str(node.name)], [], None, None)", "docstring": "Generates python code calling a rule.\n\n self.evalRule('rulename')", "id": "f4969:c0:m7"} {"signature": "def visit_Rep1N(self, node: parsing.Rep0N) -> [ast.stmt]:", "body": "clause = self.visit(node.pt)if isinstance(clause, ast.expr):return (self._clause(clause) + self.visit_Rep0N(node))self.in_loop += clause = self._clause(self.visit(node.pt))self.in_loop -= return self._clause(self.visit(node.pt)) + [ast.While(ast.Name('', ast.Load()), clause, [])]", "docstring": "Generates python code for a clause repeated 1 or more times.\n\n \n while True:\n ", "id": "f4969:c0:m15"} {"signature": "def parse_attrs(self, tag):", "body": "if tag.name in ATTR_WHITELIST.keys():attrs = copy(tag.attrs)for attr, value in attrs.items():if attr in ATTR_WHITELIST[tag.name]:tag.attrs[attr] = self._parse_attr(tag.name, attr, value)else:del tag.attrs[attr]else:tag.attrs = {}", "docstring": "Reject attributes not defined in ATTR_WHITELIST.", "id": "f4980:c0:m9"} {"signature": "def remove_comments(self, tag):", "body": "if tag.get('', '').startswith(''):tag.parent.extract()", "docstring": "Remove comments.", "id": "f4980:c0:m2"} {"signature": "def unwrap_span(self, tag):", "body": "tag.unwrap()", "docstring": "Remove span tags while preserving contents.", "id": "f4980:c0:m8"} {"signature": "def create_italic(self, tag):", "body": "style = tag.get('')if style and '' in style:tag.wrap(self.soup.new_tag(''))", "docstring": "See if span tag has italic style and wrap with em tag.", "id": "f4980:c0:m5"} {"signature": "def _parse_attr(self, tagname, attr, value):", "body": "if tagname == '' and attr == '':return self._parse_href(value)else:return value", "docstring": "Parse attribute. Delegate to href parser for hrefs, otherwise return\nvalue.", "id": "f4980:c0:m15"} {"signature": "@injector_pointerdef zprTYSyMkLEC():", "body": "pkg = Package(\"\")class Container(Injector):foo = pkg.injected.Container.foobar = pkg.injected.Container.barbaz = return Container", "docstring": "Attribute access submodule.", "id": "f5017:m9"} {"signature": "@package_definitionsdef uHSfYcZjGSJQ():", "body": "pkg = Package(\"\")sub = Package(\"\")class Container(Injector):itself = pkgsubmodule = subinstance = sub.Barinstance_method = sub.Bar.dofunction = sub.functionvariable = sub.variablekeep_class = sub.Bara = -b = return Container", "docstring": "Constructor argument submodule.", "id": "f5017:m7"} {"signature": "@injector_pointerdef dqXJgFoftQja():", "body": "injected = Package(\"\")class Container(Injector):foo = injected.Container.foobar = injected.Container.barbaz = return Container", "docstring": "Constructor argument submodule.", "id": "f5017:m10"} {"signature": "@item_accessdef dc4fedcd09d8():", "body": "class Container(Injector):foo = {(\"\", ): }bar = this.foo[(\"\", )]result = Container.barreturn result", "docstring": "Get items from dict with tuple keys.", "id": "f5019:m9"} {"signature": "@too_manydef rN3suiVzhqMM():", "body": "Injector.let(SubContainer=Injector.let(foo=(this << ).bar)).SubContainer.foo", "docstring": "Let notation with nested layer.", "id": "f5019:m29"} {"signature": "@too_manydef ww6xNI4YrNr6():", "body": "Injector.let(foo=(this << ).bar).foo", "docstring": "Let notation.", "id": "f5019:m28"} {"signature": "@call.xfaildef zVaXyVseCxYS():", "body": "class Container(Injector):foo = this.bar()bar = lambda: return Container", "docstring": "Attribute call.", "id": "f5019:m11"} {"signature": "@item_accessdef ab4cdbf60b2f():", "body": "class Container(Injector):foo = {\"\": {\"\": }}class SubContainer(Injector):class SubSubContainer(Injector):spam = (this << ).foo[\"\"][\"\"]result = Container.SubContainer.SubSubContainer.spamreturn result", "docstring": "Get item from the outer container of any depth level.", "id": "f5019:m6"} {"signature": "@attribute_errordef t1jn9RI9v42t():", "body": "class Container(Injector):foo = this.barContainer.foo", "docstring": "Declarative Injector.", "id": "f5019:m31"} {"signature": "@negative_integersdef nvm3ybp98vGm():", "body": "this << ", "docstring": "Zero.", "id": "f5019:m24"} {"signature": "@containersdef xPa7isagt3Lq(app):", "body": "@contrib.shared_taskclass Container(Injector):name = \"\"run = Runreturn Container", "docstring": "Shared task decorator.", "id": "f5020:m2"} {"signature": "@make_signaturedef u4kZae2NSFhE(container):", "body": "class NewContainer(container):options = {\"\": }sign = NewContainer.si(, , debug=True)return sign", "docstring": "Immutable shortcut signature.", "id": "f5020:m13"} {"signature": "@make_signaturedef cgTE4xh2ZSVI(container):", "body": "sign = container.signature((, ), {\"\": True}, immutable=True, countdown=)return sign", "docstring": "Verbose signature.", "id": "f5020:m8"} {"signature": "@lowest_containerdef heSHjuBBFVLp():", "body": "return Injector.let(bar=(this << ).foo)", "docstring": "Let notation.", "id": "f5022:m12"} {"signature": "@subcontainerdef iGphUpthTooT():", "body": "class SubContainer(Injector):bar = (this << ).fooreturn SubContainer", "docstring": "Declarative injector.", "id": "f5022:m3"} {"signature": "@attribute_assignmentdef pHfF0rbEjCsV():", "body": "Container = Injector.let()Container.foo = ", "docstring": "Let notation.", "id": "f5023:m25"} {"signature": "@attribute_errordef e2f16596a652():", "body": "class Foo(Injector):passFoo.let().test", "docstring": "Let notation from subclass.", "id": "f5023:m50"} {"signature": "@inheritance_orderdef aa10c7747a1f(Container1, Container2, Container3):", "body": "class Foo(Container1, Container2, Container3):passassert Foo.x == ", "docstring": "Inheritance.", "id": "f5023:m38"} {"signature": "@deny_kwargsdef e281099be65d(Foo):", "body": "class Summator(Injector):foo = Fookwargs = {\"\": }", "docstring": "Declarative injector.", "id": "f5023:m61"} {"signature": "@deny_varargsdef f7ef2aa82c18(Foo):", "body": "Injector.let(foo=Foo, args=(, , ))", "docstring": "Let notation.", "id": "f5023:m59"} {"signature": "@has_attributedef zlZoLka31ndk():", "body": "return Injector.let(foo=)", "docstring": "Let notation.", "id": "f5023:m56"} {"signature": "@attribute_errordef f9c50c81e8c9():", "body": "Foo = Injector.let()Foo.test", "docstring": "Let notation.", "id": "f5023:m49"} {"signature": "@attribute_assignmentdef tQeRzD5ZsyTm():", "body": "del Injector.let", "docstring": "Delete attribute from `Injector` directly.", "id": "f5023:m28"} {"signature": "@deny_varargs_kwargsdef c4362558f312(Foo):", "body": "Injector.let(foo=Foo, args=(, , ), kwargs={\"\": })", "docstring": "Let notation.", "id": "f5023:m65"} {"signature": "@multiple_inheritancedef efdc426cd096(Foo, FooContainer, BarContainer, BazContainer):", "body": "assert isinstance((FooContainer & BarContainer & BazContainer).baz.bar.foo, Foo)", "docstring": "Inplace creation.", "id": "f5023:m36"} {"signature": "@incomplete_dependenciesdef c4e7ecf75167():", "body": "class Bar(object):def __init__(self, test, two=):self.test = testself.two = twoclass Foo(Injector):bar = BarFoo.bar", "docstring": "Keyword arguments in the constructor.", "id": "f5023:m52"} {"signature": "@cls_named_argumentsdef dad79637580d(Foo, Bar):", "body": "class Container(Injector):bar = Bar", "docstring": "Declarative injector.", "id": "f5023:m75"} {"signature": "@deny_calldef a95940f44400():", "body": "class Foo(Injector):passFoo()", "docstring": "Subclass call.", "id": "f5023:m71"} {"signature": "@deny_calldef ce52d740af31():", "body": "Injector()", "docstring": "Direct call.", "id": "f5023:m70"} {"signature": "@long_lowest_containerdef qRcNcKzedWaI():", "body": "class SubSubContainer(Injector):bar = (this << ).fooreturn SubSubContainer", "docstring": "Declarative injector.", "id": "f5024:m26"} {"signature": "@long_two_levelsdef eHyErh9kExHG(middle):", "body": "class Container(Injector):foo = this.SubContainer.bazSubContainer = middleContainer.foo", "docstring": "Declarative injector.", "id": "f5024:m22"} {"signature": "@flat_injectordef n8NHZqiZN43Q():", "body": "Injector.let(foo=this.foo).foo", "docstring": "Let notation. Link to self.", "id": "f5024:m2"} {"signature": "@subcontainerdef nFibPCOxGsrX():", "body": "return Injector.let(bar=(this << ).foo)", "docstring": "Let notation.", "id": "f5024:m11"} {"signature": "@complex_two_levelsdef bCw8LPUeVK6J(middle):", "body": "Injector.let(foo=this.SubContainer.SubSubContainer.bar, SubContainer=middle).foo", "docstring": "Let notation.", "id": "f5024:m15"} {"signature": "@items.xfail def t41yMywZuPhA():", "body": "SubContainer = Injector.let(foo=(this << ).bar[\"\"].foo)Injector.let(SubContainer=SubContainer, bar={\"\": SubContainer}).SubContainer.foo", "docstring": "Let notation.", "id": "f5024:m40"} {"signature": "@subcontainer2def rRsNsCaBSxke():", "body": "return Injector.let(baz=(this << ).SubContainer1.bar)", "docstring": "Let notation.", "id": "f5024:m36"} {"signature": "@complex_lowest_containerdef epoadTufdhne():", "body": "pkg = Package(\"\")return pkg.injected.SubSubContainer", "docstring": "Package link.", "id": "f5024:m20"} {"signature": "@flat_injectordef ySnRrxW6M79T():", "body": "Injector.let(foo=this.bar, bar=this.foo).foo", "docstring": "Let notation. Complex loop.", "id": "f5024:m4"} {"signature": "@deny_kwargsdef puELUDZLxkDG(arg):", "body": "class Container(Injector):func = arg", "docstring": "Declarative injector.", "id": "f5026:m7"} {"signature": "@complex_circle_depsdef d9c4e136c92c(Foo, Bar):", "body": "class First(Injector):foo = Fooclass Second(First):bar = BarSecond.foo", "docstring": "Declarative injector with inheritance.", "id": "f5028:m11"} {"signature": "@circle_defsdef kHqAxHovWKtI():", "body": "@valuedef Foo(foo):passreturn Foo", "docstring": "Value.", "id": "f5028:m5"} {"signature": "@complex_circle_depsdef b54832f696e9(Foo, Bar):", "body": "Summator = Injector.let(foo=Foo, bar=Bar)Summator.foo", "docstring": "Let notation.", "id": "f5028:m12"} {"signature": "@long_circle_defs_bazdef uaOWixpAMVma():", "body": "class Baz(object):def __init__(self, foo):passreturn Baz", "docstring": "Class.", "id": "f5028:m43"} {"signature": "@long_circle_defs_bardef hpRbxUtEWyGJ():", "body": "@operationdef Bar(baz):passreturn Bar", "docstring": "Operation.", "id": "f5028:m38"} {"signature": "@long_circle_defs_bazdef fvMICnYvGZlw():", "body": "@operationdef Baz(foo):passreturn Baz", "docstring": "Operation.", "id": "f5028:m44"} {"signature": "@complex_circle_defs_foodef fkedDYYeueXo():", "body": "pkg = Package(\"\")return pkg.circles.complex_value.Foo", "docstring": "Package link to value.", "id": "f5028:m19"} {"signature": "@circle_defsdef xoGkuXokhXpZ():", "body": "pkg = Package(\"\")return pkg.circles.simple_class.Foo", "docstring": "Package link to class.", "id": "f5028:m6"} {"signature": "@circle_depsdef e4b38a38de7e(Foo):", "body": "Summator = Injector.let(foo=Foo)Summator.foo", "docstring": "Let notation.", "id": "f5028:m2"} {"signature": "@complex_circle_defs_bardef uEevbDxHVHfN():", "body": "class Bar(object):def __init__(self, foo):passreturn Bar", "docstring": "Class.", "id": "f5028:m20"} {"signature": "@complex_circle_defs_bardef trvcvfPoOBEv():", "body": "pkg = Package(\"\")return pkg.circles.complex_class.Bar", "docstring": "Package link to class.", "id": "f5028:m23"} {"signature": "@long_circle_defs_bardef mLsXYSzlYPRO():", "body": "@valuedef Bar(baz):passreturn Bar", "docstring": "Value.", "id": "f5028:m39"} {"signature": "@circle_defsdef gjhRaqkLmRmy():", "body": "@operationdef Foo(foo):passreturn Foo", "docstring": "Operation.", "id": "f5028:m4"} {"signature": "@long_circle_depsdef fc13db5b9fda(Foo, Bar, Baz):", "body": "class First(Injector):foo = Fooclass Second(First):bar = Barbaz = BazSecond.foo", "docstring": "Declarative injector with inheritance.", "id": "f5028:m28"} {"signature": "@deny_kwargsdef jbfjlQveNjrZ(arg):", "body": "Injector.let(func=arg)", "docstring": "Let notation.", "id": "f5029:m8"} {"signature": "def register(injector):", "body": "if \"\" not in injector:injector.fixtureoptions = {\"\": injector.name}for option in [\"\", \"\", \"\", \"\"]:if option in injector:options[option] = getattr(injector, option)@pytest.fixture(**options)def __fixture(request):return injector.let(request=request).fixture__fixture.injector = injectorreturn __fixture", "docstring": "Register Py.test fixture performing injection in it's scope.", "id": "f5031:m0"} {"signature": "def api_view(injector):", "body": "handler = create_handler(APIView, injector)apply_http_methods(handler, injector)apply_api_view_methods(handler, injector)return injector.let(as_view=handler.as_view)", "docstring": "Create DRF class-based API view from injector class.", "id": "f5038:m0"} {"signature": "def model_view_set(injector):", "body": "handler = create_handler(ModelViewSet, injector)apply_api_view_methods(handler, injector)apply_generic_api_view_methods(handler, injector)apply_model_view_set_methods(handler, injector)return injector.let(as_viewset=lambda: handler)", "docstring": "Create DRF model view set from injector class.", "id": "f5038:m2"} {"signature": "@classmethoddef let(cls, **kwargs):", "body": "return type(cls.__name__, (cls,), kwargs)", "docstring": "Produce new Injector with some dependencies overwritten.", "id": "f5054:m1"} {"signature": "def _get_ngrams(n, text):", "body": "ngram_set = set()text_length = len(text)max_index_ngram_start = text_length - nfor i in range(max_index_ngram_start + ):ngram_set.add(tuple(text[i:i + n]))return ngram_set", "docstring": "Calcualtes n-grams.\n\n Args:\n n: which n-grams to calculate\n text: An array of tokens\n\n Returns:\n A set of n-grams", "id": "f5065:m0"} {"signature": "def rouge_l_summary_level(evaluated_sentences, reference_sentences):", "body": "if len(evaluated_sentences) <= or len(reference_sentences) <= :raise ValueError(\"\")m = len(set(_split_into_words(reference_sentences)))n = len(set(_split_into_words(evaluated_sentences)))union_lcs_sum_across_all_references = union = set()for ref_s in reference_sentences:lcs_count, union = _union_lcs(evaluated_sentences,ref_s,prev_union=union)union_lcs_sum_across_all_references += lcs_countllcs = union_lcs_sum_across_all_referencesr_lcs = llcs / mp_lcs = llcs / nbeta = p_lcs / (r_lcs + )num = ( + (beta**)) * r_lcs * p_lcsdenom = r_lcs + ((beta**) * p_lcs)f_lcs = num / (denom + )return {\"\": f_lcs, \"\": p_lcs, \"\": r_lcs}", "docstring": "Computes ROUGE-L (summary level) of two text collections of sentences.\nhttp://research.microsoft.com/en-us/um/people/cyl/download/papers/\nrouge-working-note-v1.3.1.pdf\n\nCalculated according to:\nR_lcs = SUM(1, u)[LCS(r_i,C)]/m\nP_lcs = SUM(1, u)[LCS(r_i,C)]/n\nF_lcs = ((1 + beta^2)*R_lcs*P_lcs) / (R_lcs + (beta^2) * P_lcs)\n\nwhere:\nSUM(i,u) = SUM from i through u\nu = number of sentences in reference summary\nC = Candidate summary made up of v sentences\nm = number of words in reference summary\nn = number of words in candidate summary\n\nArgs:\n evaluated_sentences: The sentences that have been picked by the\n summarizer\n reference_sentence: One of the sentences in the reference summaries\n\nReturns:\n A float: F_lcs\n\nRaises:\n ValueError: raises exception if a param has len <= 0", "id": "f5065:m10"} {"signature": "def _union_lcs(evaluated_sentences, reference_sentence, prev_union=None):", "body": "if prev_union is None:prev_union = set()if len(evaluated_sentences) <= :raise ValueError(\"\")lcs_union = prev_unionprev_count = len(prev_union)reference_words = _split_into_words([reference_sentence])combined_lcs_length = for eval_s in evaluated_sentences:evaluated_words = _split_into_words([eval_s])lcs = set(_recon_lcs(reference_words, evaluated_words))combined_lcs_length += len(lcs)lcs_union = lcs_union.union(lcs)new_lcs_count = len(lcs_union) - prev_countreturn new_lcs_count, lcs_union", "docstring": "Returns LCS_u(r_i, C) which is the LCS score of the union longest common\nsubsequence between reference sentence ri and candidate summary C.\nFor example:\nif r_i= w1 w2 w3 w4 w5, and C contains two sentences: c1 = w1 w2 w6 w7 w8\nand c2 = w1 w3 w8 w9 w5, then the longest common subsequence of r_i and c1\nis \"w1 w2\" and the longest common subsequence of r_i and c2 is \"w1 w3 w5\".\nThe union longest common subsequence of r_i, c1, and c2 is \"w1 w2 w3 w5\"\nand LCS_u(r_i, C) = 4/5.\n\nArgs:\n evaluated_sentences: The sentences that have been picked by the\n summarizer\n reference_sentence: One of the sentences in the reference summaries\n\nReturns:\n float: LCS_u(r_i, C)\n\nValueError:\n Raises exception if a param has len <= 0", "id": "f5065:m9"} {"signature": "def add_path(self, path):", "body": "if path not in self.paths:self.paths.append(path)", "docstring": "Adds a path to search through when attempting to look up a module.\n\n:param path: the path the add to the list of searchable paths", "id": "f5080:c2:m1"} {"signature": "def load_module(self, module_name):", "body": "if module_name != self.module_name:raise LoaderError('')if module_name in sys.modules:return sys.modules[module_name]module = self.load_module_py_path(module_name, self.load_target)if self.is_pkg:module.__path__ = [self.module_path]module.__package__ = module_nameelse:module.__package__ = module_name.rpartition('')[]sys.modules[module_name] = modulereturn module", "docstring": "Loads a module's code and sets the module's expected hidden\nvariables. For more information on these variables and what they\nare for, please see PEP302.\n\n:param module_name: the full name of the module to load", "id": "f5080:c1:m2"} {"signature": "def rlist_modules(mname):", "body": "module = import_module(mname)if not module:raise ImportError(''.format(mname))found = list()if _should_use_module_path(module):mpath = module.__path__[]else:mpaths = sys.pathmpath = _scan_paths_for(mname, mpaths)if mpath:for pmname in _search_for_modules(mpath, recursive=True):found_mod = MODULE_PATH_SEP.join((mname, pmname))found.append(found_mod)return found", "docstring": "Attempts to the submodules under a module recursively. This function\nworks for modules located in the default path as well as extended paths\nvia the sys.meta_path hooks.\n\nThis function carries the expectation that the hidden module variable\n'__path__' has been set correctly.\n\n:param mname: the module name to descend into", "id": "f5084:m8"} {"signature": "def rlist_classes(module, cls_filter=None):", "body": "found = list()mnames = rlist_modules(module)for mname in mnames:[found.append(c) for c in list_classes(mname, cls_filter)]return found", "docstring": "Attempts to list all of the classes within a given module namespace.\nThis method, unlike list_classes, will recurse into discovered\nsubmodules.\n\nIf a type filter is set, it will be called with each class as its\nparameter. This filter's return value must be interpretable as a\nboolean. Results that evaluate as True will include the type in the\nlist of returned classes. Results that evaluate as False will exclude\nthe type in the list of returned classes.\n\n:param mname: of the module to descend into\n:param cls_filter: a function to call to determine what classes should be\n included.", "id": "f5084:m10"} {"signature": "def rdiscover_modules(directory):", "body": "found = list()if os.path.isdir(directory):for entry in os.listdir(directory):next_dir = os.path.join(directory, entry)if os.path.isfile(os.path.join(next_dir, MODULE_INIT_FILE)):modules = _search_for_modules(next_dir, True, entry)found.extend(modules)return found", "docstring": "Attempts to list all of the modules and submodules found within a given\ndirectory tree. This function recursively searches the directory tree\nfor potential python modules and returns a list of candidate names.\n\n**Note:** This function returns a list of strings representing\ndiscovered module names, not the actual, loaded modules.\n\n:param directory: the directory to search for modules.", "id": "f5084:m6"} {"signature": "def mapk(actual, predicted, k=):", "body": "return np.mean([apk(a,p,k) for a,p in zip(actual, predicted)])", "docstring": "Computes the mean average precision at k.\n\nThis function computes the mean average prescision at k between two lists\nof lists of items.\n\nParameters\n----------\nactual : list\n A list of lists of elements that are to be predicted \n (order doesn't matter in the lists)\npredicted : list\n A list of lists of predicted elements\n (order matters in the lists)\nk : int, optional\n The maximum number of predicted elements\n\nReturns\n-------\nscore : double\n The mean average precision at k over the input lists", "id": "f5091:m1"} {"signature": "def clear(self):", "body": "self.reg = np.zeros((self.m,), dtype=np.int8)", "docstring": "Reset the current HyperLogLog to empty.", "id": "f5103:c0:m8"} {"signature": "def is_empty(self):", "body": "if np.any(self.reg):return Falsereturn True", "docstring": "Returns:\n bool: True if the current HyperLogLog is empty - at the state of just\n initialized.", "id": "f5103:c0:m7"} {"signature": "def __eq__(self, other):", "body": "return type(self) is type(other) andself.seed == other.seed andnp.array_equal(self.hashvalues, other.hashvalues)", "docstring": "Returns:\n bool: If their seeds and hash values are both equal then two\n are equivalent.", "id": "f5104:c0:m5"} {"signature": "def copy(self):", "body": "return WeightedMinHash(self.seed, self.digest())", "docstring": "Returns:\n datasketch.WeightedMinHash: A copy of this weighted MinHash by exporting \n its state.", "id": "f5104:c0:m3"} {"signature": "def jaccard(self, other):", "body": "if other.seed != self.seed:raise ValueError(\"\")if len(self) != len(other):raise ValueError(\"\")intersection = for this, that in zip(self.hashvalues, other.hashvalues):if np.array_equal(this, that):intersection += return float(intersection) / float(len(self))", "docstring": "Estimate the `weighted Jaccard similarity`_ between the\n multi-sets represented by this weighted MinHash and the other.\n\n Args:\n other (datasketch.WeightedMinHash): The other weighted MinHash.\n\n Returns:\n float: The weighted Jaccard similarity between 0.0 and 1.0.\n\n .. _`weighted Jaccard similarity`: http://mathoverflow.net/questions/123339/weighted-jaccard-similarity", "id": "f5104:c0:m1"} {"signature": "def remove(self, key):", "body": "if self.prepickle:key = pickle.dumps(key)if key not in self.keys:raise ValueError(\"\")for H, hashtable in zip(self.keys[key], self.hashtables):hashtable.remove_val(H, key)if not hashtable.get(H):hashtable.remove(H)self.keys.remove(key)", "docstring": "Remove the key from the index.\n\nArgs:\n key (hashable): The unique identifier of a set.", "id": "f5105:c0:m8"} {"signature": "def get_counts(self):", "body": "counts = [hashtable.itemcounts() for hashtable in self.hashtables]return counts", "docstring": "Returns a list of length ``self.b`` with elements representing the\nnumber of keys stored under each bucket for the given permutation.", "id": "f5105:c0:m12"} {"signature": "async def get_subset_counts(self, *keys):", "body": "key_set = list(set(keys))hashtables = [unordered_storage({'': ''}) for _ in range(self.b)]Hss = await self.keys.getmany(*key_set)for key, Hs in zip(key_set, Hss):for H, hashtable in zip(Hs, hashtables):hashtable.insert(H, key)return [hashtable.itemcounts() for hashtable in hashtables]", "docstring": "see :class:`datasketch.MinHashLSH`.", "id": "f5107:c0:m24"} {"signature": "async def remove(self, key):", "body": "await self._remove(key, buffer=False)", "docstring": "see :class:`datasketch.MinHashLSH`.", "id": "f5107:c0:m18"} {"signature": "def _compute_nfp_real(l, u, counts, sizes):", "body": "if l > u:raise ValueError(\"\")return np.sum((float(sizes[u])-sizes[l:u+])/float(sizes[u])*counts[l:u+])", "docstring": "Computes the expected number of false positives caused by using\n u to approximate set sizes in the interval [l, u], using the real\n set size distribution.\n\n Args:\n l: the lower bound on set sizes.\n u: the upper bound on set sizes.\n counts: the complete distribution of set sizes.\n sizes: the complete domain of set sizes.\n\n Return (float): the expected number of false positives.", "id": "f5110:m2"} {"signature": "def _compute_nfps_uniform(cum_counts, sizes):", "body": "nfps = np.zeros((len(sizes), len(sizes)))for l in range(len(sizes)):for u in range(l, len(sizes)):nfps[l, u] = _compute_nfp_uniform(l, u, cum_counts, sizes)return nfps", "docstring": "Computes the matrix of expected false positives for all possible\n sub-intervals of the complete domain of set sizes, assuming uniform\n distribution of set_sizes within each sub-intervals.\n\n Args:\n cum_counts: the complete cummulative distribution of set sizes.\n sizes: the complete domain of set sizes.\n\n Return (np.array): the 2-D array of expected number of false positives\n for every pair of [l, u] interval, where l is axis-0 and u is\n axis-1.", "id": "f5110:m1"} {"signature": "@abstractmethoddef remove(self, *keys):", "body": "pass", "docstring": "Remove `keys` from storage", "id": "f5111:c0:m9"} {"signature": "@abstractmethoddef has_key(self, key):", "body": "pass", "docstring": "Determines whether the key is in the storage or not", "id": "f5111:c0:m13"} {"signature": "def query(self, minhash, size):", "body": "for i, index in enumerate(self.indexes):u = self.uppers[i]if u is None:continueb, r = self._get_optimal_param(u, size)for key in index[r]._query_b(minhash, b):yield key", "docstring": "Giving the MinHash and size of the query set, retrieve\nkeys that references sets with containment with respect to\nthe query set greater than the threshold.\n\nArgs:\n minhash (datasketch.MinHash): The MinHash of the query set.\n size (int): The size (number of unique items) of the query set.\n\nReturns:\n `iterator` of keys.", "id": "f5112:c0:m4"} {"signature": "def update(self, b):", "body": "hv = self.hashfunc(b)a, b = self.permutationsphv = np.bitwise_and((a * hv + b) % _mersenne_prime, np.uint64(_max_hash))self.hashvalues = np.minimum(phv, self.hashvalues)", "docstring": "Update this MinHash with a new value.\n The value will be hashed using the hash function specified by\n the `hashfunc` argument in the constructor.\n\n Args:\n b: The value to be hashed using the hash function specified.\n\n Example:\n To update with a new string value (using the default SHA1 hash\n function, which requires bytes as input):\n\n .. code-block:: python\n\n minhash = Minhash()\n minhash.update(\"new value\".encode('utf-8'))\n\n We can also use a different hash function, for example, `pyfarmhash`:\n\n .. code-block:: python\n\n import farmhash\n def _hash_32(b):\n return farmhash.hash32(b)\n minhash = MinHash(hashfunc=_hash_32)\n minhash.update(\"new value\")", "id": "f5114:c0:m3"} {"signature": "def copy(self):", "body": "return MinHash(seed=self.seed, hashfunc=self.hashfunc,hashvalues=self.digest(),permutations=self.permutations)", "docstring": ":returns: datasketch.MinHash -- A copy of this MinHash by exporting its state.", "id": "f5114:c0:m10"} {"signature": "def digest(self):", "body": "return copy.copy(self.hashvalues)", "docstring": "Export the hash values, which is the internal state of the\n MinHash.\n\n Returns:\n numpy.array: The hash values which is a Numpy array.", "id": "f5114:c0:m7"} {"signature": "def __len__(self):", "body": "return len(self.hashvalues)", "docstring": ":returns: int -- The number of hash values.", "id": "f5114:c0:m11"} {"signature": "def bytesize(self, byteorder=''):", "body": "seed_size = struct.calcsize(byteorder+'')length_size = struct.calcsize(byteorder+'')hashvalue_size = struct.calcsize(byteorder+'')return seed_size + length_size + len(self) * hashvalue_size", "docstring": "Compute the byte size after serialization.\n\n Args:\n byteorder (str, optional): This is byte order of the serialized data. Use one\n of the `byte order characters\n `_:\n ``@``, ``=``, ``<``, ``>``, and ``!``.\n Default is ``@`` -- the native order.\n\n Returns:\n int: Size in number of bytes after serialization.", "id": "f5115:c0:m4"} {"signature": "def _calc_a(self, r, b):", "body": "if r == :return / ( << b)return r * ( - r) ** ( ** b - ) / ( - ( - r) ** ( * b))", "docstring": "Compute the function A(r, b)", "id": "f5116:c0:m6"} {"signature": "def jaccard(self, other):", "body": "if self.b != other.b:raise ValueError(\"\")if self.seed != other.seed:raise ValueError(\"\")intersection = np.count_nonzero(self.hashvalues==other.hashvalues)raw_est = float(intersection) / float(self.hashvalues.size)a1 = self._calc_a(self.r, self.b)a2 = self._calc_a(other.r, other.b)c1, c2 = self._calc_c(a1, a2, self.r, other.r)return (raw_est - c1) / ( - c2)", "docstring": "Estimate the Jaccard similarity (resemblance) between this b-bit\nMinHash and the other.", "id": "f5116:c0:m2"} {"signature": "def index(self):", "body": "for i, hashtable in enumerate(self.hashtables):self.sorted_hashtables[i] = [H for H in hashtable.keys()]self.sorted_hashtables[i].sort()", "docstring": "Index all the keys added so far and make them searchable.", "id": "f5117:c0:m2"} {"signature": "def unwrap_self_for_multiprocessing(arg):", "body": "(inst, method_name, args) = argreturn getattr(inst, method_name)(*args)", "docstring": "You can not call methods with multiprocessing, but free functions,\n If you want to call inst.method(arg0, arg1),\n\n unwrap_self_for_multiprocessing(inst, \"method\", (arg0, arg1))\n\n does the trick.", "id": "f5149:m1"} {"signature": "@cli.command()@click.option('', '', required=True, type=click.Path(exists=True), help='')@click.option('','', type=click.Path(exists=False), help='')@click.option('', required=True, type=click.Path(exists=True), help='')def backpropagate(infile, outfile, apply_scores):", "body": "if outfile is None:outfile = infileelse:outfile = outfilebackpropagate_oswr(infile, outfile, apply_scores)", "docstring": "Backpropagate multi-run peptide and protein scores to single files", "id": "f5154:m8"} {"signature": "@cli.command()@click.argument('', nargs=-, type=click.Path(exists=True))@click.option('', '', required=True, type=click.Path(exists=True), help='')@click.option('', default=, show_default=True, type=float, help='')@click.option('', default=, show_default=True, type=float, help='')@click.option('', default=, show_default=True, type=float, help='')def filter(sqmassfiles, infile, max_precursor_pep, max_peakgroup_pep, max_transition_pep):", "body": "filter_sqmass(sqmassfiles, infile, max_precursor_pep, max_peakgroup_pep, max_transition_pep)", "docstring": "Filter sqMass files", "id": "f5154:m11"} {"signature": "@cli.command()@click.option('', '', required=True, type=click.Path(exists=True), help='')@click.option('','', type=click.Path(exists=False), help='')def reduce(infile, outfile):", "body": "if outfile is None:outfile = infileelse:outfile = outfilereduce_osw(infile, outfile)", "docstring": "Reduce scored PyProphet file to minimum for global scoring", "id": "f5154:m6"} {"signature": "@click.group(chain=True)@click.version_option()def cli():", "body": "", "docstring": "PyProphet: Semi-supervised learning and scoring of OpenSWATH results.\n\nVisit http://openswath.org for usage instructions and help.", "id": "f5154:m0"} {"signature": "@cli.command()@click.option('', '', required=True, type=click.Path(exists=True), help='')@click.option('', '', type=click.Path(exists=False), help='')@click.option('', default='', show_default=True, type=click.Choice(['', '', '','']), help='')@click.option('', '', default=False, show_default=True, help='')@click.option('', default=True, show_default=True, help='')@click.option('', default=, show_default=True, type=float, help='')@click.option('', default='', show_default=True, type=click.Choice(['','','']), help='')@click.option('', default=, show_default=True, type=float, help='')@click.option('', default=, show_default=True, type=float, help='')@click.option('', default=True, show_default=True, help='')@click.option('', default=, show_default=True, type=float, help='')@click.option('', default=True, show_default=True, help='')@click.option('', default=, show_default=True, type=float, help='')def export(infile, outfile, format, outcsv, transition_quantification, max_transition_pep, ipf, ipf_max_peptidoform_pep, max_rs_peakgroup_qvalue, peptide, max_global_peptide_qvalue, protein, max_global_protein_qvalue):", "body": "if format == \"\":export_score_plots(infile)else:if outfile is None:if outcsv:outfile = infile.split(\"\")[] + \"\"else:outfile = infile.split(\"\")[] + \"\"else:outfile = outfileexport_tsv(infile, outfile, format, outcsv, transition_quantification, max_transition_pep, ipf, ipf_max_peptidoform_pep, max_rs_peakgroup_qvalue, peptide, max_global_peptide_qvalue, protein, max_global_protein_qvalue)", "docstring": "Export TSV/CSV tables", "id": "f5154:m9"} {"signature": "@cli.command()@click.argument('', nargs=-, type=click.Path(exists=True))@click.option('','', required=True, type=click.Path(exists=False), help='')@click.option('', default=False, help='')@click.option('','', required=True, type=click.Path(exists=False), help='')def merge(infiles, outfile, same_run, templatefile):", "body": "if len(infiles) < :raise click.ClickException(\"\")merge_osw(infiles, outfile, templatefile, same_run)", "docstring": "Merge multiple OSW files and (for large experiments, it is recommended to subsample first).", "id": "f5154:m7"} {"signature": "@cli.command()@click.option('', '', required=True, type=click.Path(exists=True), help='')@click.option('', '', type=click.Path(exists=False), help='')@click.option('', default=True, show_default=True, help='')@click.option('', default=True, show_default=True, help='')@click.option('', default=True, show_default=True, help='')@click.option('', default=False, show_default=True, help='')@click.option('', default=, show_default=True, type=float, help='')@click.option('', default=, show_default=True, type=float, help='')@click.option('', default=, show_default=True, type=float, help='')@click.option('', default=, show_default=True, type=float, help='')def ipf(infile, outfile, ipf_ms1_scoring, ipf_ms2_scoring, ipf_h0, ipf_grouped_fdr, ipf_max_precursor_pep, ipf_max_peakgroup_pep, ipf_max_precursor_peakgroup_pep, ipf_max_transition_pep):", "body": "if outfile is None:outfile = infileelse:outfile = outfileinfer_peptidoforms(infile, outfile, ipf_ms1_scoring, ipf_ms2_scoring, ipf_h0, ipf_grouped_fdr, ipf_max_precursor_pep, ipf_max_peakgroup_pep, ipf_max_precursor_peakgroup_pep, ipf_max_transition_pep)", "docstring": "Infer peptidoforms after scoring of MS1, MS2 and transition-level data.", "id": "f5154:m2"} {"signature": "@profiledef lfdr(p_values, pi0, trunc = True, monotone = True, transf = \"\", adj = , eps = np.power(,-)):", "body": "p = np.array(p_values)lfdr_out = prm_na = np.isfinite(p)p = p[rm_na]if (min(p) < or max(p) > ):raise click.ClickException(\"\")elif (pi0 < or pi0 > ):raise click.ClickException(\"\")if (transf == \"\"):p = np.maximum(p, eps)p = np.minimum(p, -eps)x = scipy.stats.norm.ppf(p, loc=, scale=)bw = bw_nrd0(x)myd = KDEUnivariate(x)myd.fit(bw=adj*bw, gridsize = )splinefit = sp.interpolate.splrep(myd.support, myd.density)y = sp.interpolate.splev(x, splinefit)lfdr = pi0 * scipy.stats.norm.pdf(x) / yelif (transf == \"\"):x = np.log((p + eps) / ( - p + eps))bw = bw_nrd0(x)myd = KDEUnivariate(x)myd.fit(bw=adj*bw, gridsize = )splinefit = sp.interpolate.splrep(myd.support, myd.density)y = sp.interpolate.splev(x, splinefit)dx = np.exp(x) / np.power(( + np.exp(x)),)lfdr = (pi0 * dx) / yelse:raise click.ClickException(\"\")if (trunc):lfdr[lfdr > ] = if (monotone):lfdr = lfdr[p.ravel().argsort()]for i in range(,len(x)):if (lfdr[i] < lfdr[i - ]):lfdr[i] = lfdr[i - ]lfdr = lfdr[scipy.stats.rankdata(p,\"\")-]lfdr_out[rm_na] = lfdrreturn lfdr_out", "docstring": "Estimate local FDR / posterior error probability from p-values according to bioconductor/qvalue", "id": "f5157:m11"} {"signature": "@profiledef error_statistics(target_scores, decoy_scores, parametric, pfdr, pi0_lambda, pi0_method = \"\", pi0_smooth_df = , pi0_smooth_log_pi0 = False, compute_lfdr = False, lfdr_trunc = True, lfdr_monotone = True, lfdr_transf = \"\", lfdr_adj = , lfdr_eps = np.power(,-)):", "body": "target_scores = to_one_dim_array(target_scores)target_scores = np.sort(target_scores[~np.isnan(target_scores)])decoy_scores = to_one_dim_array(decoy_scores)decoy_scores = np.sort(decoy_scores[~np.isnan(decoy_scores)])if parametric:target_pvalues = pnorm(target_scores, decoy_scores)else:target_pvalues = pemp(target_scores, decoy_scores)pi0 = pi0est(target_pvalues, pi0_lambda, pi0_method, pi0_smooth_df, pi0_smooth_log_pi0)target_qvalues = qvalue(target_pvalues, pi0[''], pfdr)metrics = stat_metrics(target_pvalues, pi0[''], pfdr)error_stat = pd.DataFrame({'': target_scores, '': target_pvalues, '': target_qvalues, '': metrics[''], '': metrics[''], '': metrics[''], '': metrics[''], '': metrics[''], '': metrics[''], '': metrics[''], '': metrics['']})if compute_lfdr:error_stat[''] = lfdr(target_pvalues, pi0[''], lfdr_trunc, lfdr_monotone, lfdr_transf, lfdr_adj, lfdr_eps)return error_stat, pi0", "docstring": "Takes list of decoy and target scores and creates error statistics for target values", "id": "f5157:m15"} {"signature": "def pnorm(stat, stat0):", "body": "mu, sigma = mean_and_std_dev(stat0)stat = to_one_dim_array(stat, np.float64)args = (stat - mu) / sigmareturn -( * ( + scipy.special.erf(args / np.sqrt())))", "docstring": "[P(X>pi, mu, sigma) for pi in pvalues] for normal distributed stat with\n expectation value mu and std deviation sigma", "id": "f5157:m6"} {"signature": "@profiledef summary_err_table(df, qvalues=[, , , , , , , , ]):", "body": "qvalues = to_one_dim_array(qvalues)ix = find_nearest_matches(np.float32(df.qvalue.values), qvalues)df_sub = df.iloc[ix].copy()for i_sub, (i0, i1) in enumerate(zip(ix, ix[:])):if i1 == i0:df_sub.iloc[i_sub + , :] = Nonedf_sub.qvalue = qvaluesdf_sub.reset_index(inplace=True, drop=True)return df_sub[['','','','','','','','','','','','']]", "docstring": "Summary error table for some typical q-values", "id": "f5157:m14"} {"signature": "def find_cutoff(tt_scores, td_scores, cutoff_fdr, parametric, pfdr, pi0_lambda, pi0_method, pi0_smooth_df, pi0_smooth_log_pi0):", "body": "error_stat, pi0 = error_statistics(tt_scores, td_scores, parametric, pfdr, pi0_lambda, pi0_method, pi0_smooth_df, pi0_smooth_log_pi0, False)if not len(error_stat):raise click.ClickException(\"\")i0 = (error_stat.qvalue - cutoff_fdr).abs().idxmin()cutoff = error_stat.iloc[i0][\"\"]return cutoff", "docstring": "Finds cut off target score for specified false discovery rate fdr", "id": "f5157:m16"} {"signature": "@register.filterdef obfuscate(value, juice=None):", "body": "if not settings.UNFRIENDLY_ENABLE_FILTER:return valuekwargs = {'': encrypt(value,settings.UNFRIENDLY_SECRET,settings.UNFRIENDLY_IV,checksum=settings.UNFRIENDLY_ENFORCE_CHECKSUM),}if juice:kwargs[''] = slugify(juice)return reverse('', kwargs=kwargs)", "docstring": "Template filter that obfuscates whatever text it is applied to. The text is\nsupposed to be a URL, but it will obfuscate anything.\n\nUsage:\n Extremely unfriendly URL:\n {{ \"/my-secret-path/\"|obfuscate }}\n\n Include some SEO juice:\n {{ \"/my-secret-path/\"|obfuscate:\"some SEO friendly text\" }}", "id": "f5169:m0"} {"signature": "def decrypt(ciphertext, secret, inital_vector, checksum=True, lazy=True):", "body": "secret = _lazysecret(secret) if lazy else secretencobj = AES.new(secret, AES.MODE_CFB, inital_vector)try:padded = ciphertext + ('' * (len(ciphertext) % ))decoded = base64.urlsafe_b64decode(str(padded))plaintext = encobj.decrypt(decoded)except (TypeError, binascii.Error):raise InvalidKeyError(\"\")if checksum:try:crc, plaintext = (base64.urlsafe_b64decode(plaintext[-:]), plaintext[:-])except (TypeError, binascii.Error):raise CheckSumError(\"\")if not crc == _pack_crc(plaintext):raise CheckSumError(\"\")return plaintext", "docstring": "Decrypts ciphertext with secret\n ciphertext - encrypted content to decrypt\n secret - secret to decrypt ciphertext\n inital_vector - initial vector\n lazy - pad secret if less than legal blocksize (default: True)\n checksum - verify crc32 byte encoded checksum (default: True)\n returns plaintext", "id": "f5174:m4"} {"signature": "def _crc(plaintext):", "body": "if not isinstance(plaintext, six.binary_type):plaintext = six.b(plaintext)return (zlib.crc32(plaintext) % ) & ", "docstring": "Generates crc32. Modulo keep the value within int range.", "id": "f5174:m1"} {"signature": "@njit()def _cluster_hits(hits, clusters, assigned_hit_array, cluster_hit_indices, column_cluster_distance, row_cluster_distance, frame_cluster_distance, min_hit_charge, max_hit_charge, ignore_same_hits, noisy_pixels, disabled_pixels):", "body": "total_hits = hits.shape[]if total_hits == :return max_cluster_hits = cluster_hit_indices.shape[]if total_hits != clusters.shape[]:raise ValueError(\"\")if total_hits != assigned_hit_array.shape[]:raise ValueError(\"\")if min_hit_charge == :charge_correction = else:charge_correction = start_event_hit_index = start_event_cluster_index = cluster_size = event_number = hits[]['']event_cluster_index = for i in range(total_hits):if _new_event(hits[i][''], event_number):_finish_event(hits=hits,clusters=clusters,start_event_hit_index=start_event_hit_index,stop_event_hit_index=i,start_event_cluster_index=start_event_cluster_index,stop_event_cluster_index=start_event_cluster_index + event_cluster_index)start_event_hit_index = istart_event_cluster_index = start_event_cluster_index + event_cluster_indexevent_number = hits[i]['']event_cluster_index = if assigned_hit_array[i] > : continueif not _hit_ok(hit=hits[i],min_hit_charge=min_hit_charge,max_hit_charge=max_hit_charge) or (disabled_pixels.shape[] != and _pixel_masked(hits[i], disabled_pixels)):_set_hit_invalid(hit=hits[i], cluster_id=-)assigned_hit_array[i] = continue_set_1d_array(cluster_hit_indices, -, cluster_size)cluster_hit_indices[] = iassigned_hit_array[i] = cluster_size = for j in cluster_hit_indices: if j < : breakfor k in range(cluster_hit_indices[] + , total_hits):if _new_event(hits[k][''], event_number):breakif assigned_hit_array[k] > :continueif not _hit_ok(hit=hits[k],min_hit_charge=min_hit_charge,max_hit_charge=max_hit_charge) or (disabled_pixels.shape[] != and _pixel_masked(hits[k], disabled_pixels)):_set_hit_invalid(hit=hits[k], cluster_id=-)assigned_hit_array[k] = continueif _is_in_max_difference(hits[j][''], hits[k][''], column_cluster_distance) and _is_in_max_difference(hits[j][''], hits[k][''], row_cluster_distance) and _is_in_max_difference(hits[j][''], hits[k][''], frame_cluster_distance):if not ignore_same_hits or hits[j][''] != hits[k][''] or hits[j][''] != hits[k]['']:cluster_size += if cluster_size > max_cluster_hits:raise IndexError('')cluster_hit_indices[cluster_size - ] = kassigned_hit_array[k] = else:_set_hit_invalid(hit=hits[k], cluster_id=-)assigned_hit_array[k] = if cluster_size == and noisy_pixels.shape[] != and _pixel_masked(hits[cluster_hit_indices[]], noisy_pixels):_set_hit_invalid(hit=hits[cluster_hit_indices[]], cluster_id=-)else:_finish_cluster(hits=hits,clusters=clusters,cluster_size=cluster_size,cluster_hit_indices=cluster_hit_indices,cluster_index=start_event_cluster_index + event_cluster_index,cluster_id=event_cluster_index,charge_correction=charge_correction,noisy_pixels=noisy_pixels,disabled_pixels=disabled_pixels)event_cluster_index += _finish_event(hits=hits,clusters=clusters,start_event_hit_index=start_event_hit_index,stop_event_hit_index=total_hits,start_event_cluster_index=start_event_cluster_index,stop_event_cluster_index=start_event_cluster_index + event_cluster_index)total_clusters = start_event_cluster_index + event_cluster_indexreturn total_clusters", "docstring": "Main precompiled function that loopes over the hits and clusters them", "id": "f5184:m8"} {"signature": "@njit()def _is_in_max_difference(value_1, value_2, max_difference):", "body": "if value_1 <= value_2:return value_2 - value_1 <= max_differencereturn value_1 - value_2 <= max_difference", "docstring": "Helper function to determine the difference of two values that can be np.uints. Works in python and numba mode.\n Circumvents numba bug #1653", "id": "f5184:m7"} {"signature": "def set_hit_dtype(self, hit_dtype):", "body": "if not hit_dtype:hit_dtype = np.dtype([])else:hit_dtype = np.dtype(hit_dtype)cluster_hits_descr = hit_dtype.descrfor dtype_name, dtype in self._default_cluster_hits_descr:if self._hit_fields_mapping[dtype_name] not in hit_dtype.fields:cluster_hits_descr.append((dtype_name, dtype))self._cluster_hits_descr = cluster_hits_descrself._init_arrays(size=)", "docstring": "Set the data type of the hits.\n\n Fields that are not mentioned here are NOT copied into the clustered hits array.\n Clusterizer has to know the hit data type to produce the clustered hit result with the same data types.\n\n Parameters:\n -----------\n hit_dtype : numpy.dtype or equivalent\n Defines the dtype of the hit array.\n\n Example:\n --------\n hit_dtype = [(\"column\", np.uint16), (\"row\", np.uint16)], where\n \"column\", \"row\" is the field name of the input hit array.", "id": "f5185:c0:m6"} {"signature": "def set_end_of_cluster_function(self, function):", "body": "self.cluster_functions._end_of_cluster_function = self._jitted(function)self._end_of_cluster_function = function", "docstring": "Adding function to module.\n This is maybe the only way to make the clusterizer to work with multiprocessing.", "id": "f5185:c0:m9"} {"signature": "def ignore_same_hits(self, value):", "body": "self._ignore_same_hits = value", "docstring": "Whether a duplicate hit in the event with the same column and row is ignored or not.", "id": "f5185:c0:m16"} {"signature": "def threshold_img(data, threshold, mask=None, mask_out=''):", "body": "if mask is not None:mask = threshold_img(mask, threshold, mask_out=mask_out)return data * mask.astype(bool)if mask_out.startswith(''):data[data < threshold] = elif mask_out.startswith(''):data[data > threshold] = return data", "docstring": "Threshold data, setting all values in the array above/below threshold\n to zero.\n Args:\n data (ndarray): The image data to threshold.\n threshold (float): Numeric threshold to apply to image.\n mask (ndarray): Optional 1D-array with the same length as the data. If\n passed, the threshold is first applied to the mask, and the\n resulting indices are used to threshold the data. This is primarily\n useful when, e.g., applying a statistical threshold to a z-value\n image based on a p-value threshold.\n mask_out (str): Thresholding direction. Can be 'below' the threshold\n (default) or 'above' the threshold. Note: use 'above' when masking\n based on p values.", "id": "f5201:m4"} {"signature": "def get_sphere(coords, r=, vox_dims=(, , ), dims=(, , )):", "body": "r = float(r)xx, yy, zz = [slice(-r / vox_dims[i], r / vox_dims[i] + , ) for i in range(len(coords))]cube = np.vstack([row.ravel() for row in np.mgrid[xx, yy, zz]])sphere = cube[:, np.sum(np.dot(np.diag(vox_dims), cube) ** , ) ** <= r]sphere = np.round(sphere.T + coords)return sphere[(np.min(sphere, ) >= ) &(np.max(np.subtract(sphere, dims), ) <= -), :].astype(int)", "docstring": "Return all points within r mm of coordinates. Generates a cube\n and then discards all points outside sphere. Only returns values that\n fall within the dimensions of the image.", "id": "f5201:m0"} {"signature": "def add(self, layers, above=None, below=None):", "body": "def add_named_layer(name, image):image = self.get_image(image, output='')if above is not None:image[image < above] = if below is not None:image[image > below] = self.layers[name] = imageself.stack.append(name)if isinstance(layers, dict):for (name, image) in layers.items():add_named_layer(name, image)else:if not isinstance(layers, list):layers = [layers]for image in layers:name = '' % len(self.stack)add_named_layer(name, image)self.set_mask()", "docstring": "Add one or more layers to the stack of masking layers.\n Args:\n layers: A string, NiBabel image, list, or dict. If anything other\n than a dict is passed, assigns sequential layer names based on\n the current position in stack; if a dict, uses key as the name\n and value as the mask image.", "id": "f5202:c0:m2"} {"signature": "def reset(self):", "body": "self.layers = {}self.stack = []self.set_mask()self.n_vox_in_vol = len(np.where(self.current_mask)[])", "docstring": "Reset/remove all layers, keeping only the initial volume.", "id": "f5202:c0:m1"} {"signature": "def get_image(self, image, output=''):", "body": "if isinstance(image, string_types):image = nb.load(image)if type(image).__module__.startswith(''):if output == '':return imageimage = image.get_data()if not type(image).__module__.startswith(''):raise ValueError(\"\"\"\")if image.shape[:] == self.volume.shape:if output == '':return nb.nifti1.Nifti1Image(image, None, self.get_header())elif output == '':return imageelse:image = image.ravel()if output == '':return image.ravel()image = np.reshape(image, self.volume.shape)if output == '':return imagereturn nb.nifti1.Nifti1Image(image, None, self.get_header())", "docstring": "A flexible method for transforming between different\n representations of image data.\n Args:\n image: The input image. Can be a string (filename of image),\n NiBabel image, N-dimensional array (must have same shape as\n self.volume), or vectorized image data (must have same length\n as current conjunction mask).\n output: The format of the returned image representation. Must be\n one of:\n 'vector': A 1D vectorized array\n 'array': An N-dimensional array, with\n shape = self.volume.shape\n 'image': A NiBabel image\n Returns: An object containing image data; see output options above.", "id": "f5202:c0:m4"} {"signature": "def mask(self, image, nan_to_num=True, layers=None, in_global_mask=False):", "body": "self.set_mask(layers)image = self.get_image(image, output='')if in_global_mask:masked_data = image[self.global_mask]masked_data[~self.get_mask(in_global_mask=True)] = else:masked_data = image[self.current_mask]if nan_to_num:masked_data = np.nan_to_num(masked_data)return masked_data", "docstring": "Vectorize an image and mask out all invalid voxels.\n\n Args:\n images: The image to vectorize and mask. Input can be any object\n handled by get_image().\n layers: Which mask layers to use (specified as int, string, or\n list of ints and strings). When None, applies the conjunction\n of all layers.\n nan_to_num: boolean indicating whether to convert NaNs to 0.\n in_global_mask: Whether to return the resulting masked vector in\n the globally masked space (i.e., n_voxels =\n len(self.global_mask)). If False (default), returns in the full\n image space (i.e., n_voxels = len(self.volume)).\n Returns:\n A 1D NumPy array of in-mask voxels.", "id": "f5202:c0:m5"} {"signature": "def __init__(self, dataset):", "body": "self.dataset = datasetself.data = pd.DataFrame()", "docstring": "Initialize a new FeatureTable. Takes as input a parent DataSet\n instance and feature data (if provided).", "id": "f5203:c2:m0"} {"signature": "def get_feature_counts(self, threshold=):", "body": "counts = np.sum(self.get_feature_data() >= threshold, )return dict(zip(self.get_feature_names(), list(counts)))", "docstring": "Returns a dictionary, where the keys are the feature names\n and the values are the number of studies tagged with the feature.", "id": "f5203:c0:m8"} {"signature": "def search_features(self, search):", "body": "if isinstance(search, string_types):search = [search]search = [s.replace('', '') for s in search]cols = list(self.data.columns)results = []for s in search:results.extend([f for f in cols if re.match(s + '', f)])return list(set(results))", "docstring": "Returns all features that match any of the elements in the input\n list.\n\n Args:\n search (str, list): A string or list of strings defining the query.\n\n Returns:\n A list of matching feature names.", "id": "f5203:c2:m6"} {"signature": "def _sdf_to_csr(self):", "body": "data = self.data.to_dense()self.data = {'': list(data.columns),'': list(data.index),'': sparse.csr_matrix(data.values)}", "docstring": "Convert FeatureTable to SciPy CSR matrix.", "id": "f5203:c2:m9"} {"signature": "def add_features(self, features, append=True, merge='',duplicates='', min_studies=, threshold=):", "body": "if (not append) or not hasattr(self, ''):self.feature_table = FeatureTable(self)self.feature_table.add_features(features, merge=merge,duplicates=duplicates,min_studies=min_studies,threshold=threshold)", "docstring": "Construct a new FeatureTable from file.\n\n Args:\n features: Feature data to add. Can be:\n (a) A text file containing the feature data, where each row is\n a study in the database, with features in columns. The first\n column must contain the IDs of the studies to match up with the\n image data.\n (b) A pandas DataFrame, where studies are in rows, features are\n in columns, and the index provides the study IDs.\n append (bool): If True, adds new features to existing ones\n incrementally. If False, replaces old features.\n merge, duplicates, min_studies, threshold: Additional arguments\n passed to FeatureTable.add_features().", "id": "f5203:c0:m4"} {"signature": "def create_image_table(self, r=None):", "body": "logger.info(\"\")if r is not None:self.r = rself.image_table = ImageTable(self)", "docstring": "Create and store a new ImageTable instance based on the current\n Dataset. Will generally be called privately, but may be useful as a\n convenience method in cases where the user wants to re-generate the\n table with a new smoothing kernel of different radius.\n\n Args:\n r (int): An optional integer indicating the radius of the smoothing\n kernel. By default, this is None, which will keep whatever\n value is currently set in the Dataset instance.", "id": "f5203:c0:m2"} {"signature": "def get_image_data(self, ids=None, voxels=None, dense=True):", "body": "if dense and ids is None and voxels is None:logger.warning(\"\"\"\"\"\"\"\"\"\"\"\")result = self.dataif ids is not None:idxs = np.where(np.in1d(np.array(self.ids), np.array(ids)))[]result = result[:, idxs]if voxels is not None:result = result[voxels, :]return result.toarray() if dense else result", "docstring": "Slices and returns a subset of image data.\n\n Args:\n ids (list, array): A list or 1D numpy array of study ids to\n return. If None, returns data for all studies.\n voxels (list, array): A list or 1D numpy array of voxel indices\n (i.e., rows) to return. If None, returns data for all voxels.\n dense (bool): Optional boolean. When True (default), convert the\n result to a dense array before returning. When False, keep as\n sparse matrix.\n\n Returns:\n A 2D numpy array with voxels in rows and studies in columns.", "id": "f5203:c1:m1"} {"signature": "def get_feature_data(self, ids=None, features=None, dense=True):", "body": "result = self.dataif ids is not None:result = result.ix[ids]if features is not None:result = result.ix[:, features]return result.to_dense() if dense else result", "docstring": "Slices and returns a subset of feature data.\n\n Args:\n ids (list, array): A list or 1D numpy array of study ids to\n return rows for. If None, returns data for all studies\n (i.e., all rows in array).\n features (list, array): A list or 1D numpy array of named features\n to return. If None, returns data for all features (i.e., all\n columns in array).\n dense (bool): Optional boolean. When True (default), convert the\n result to a dense array before returning. When False, keep as\n sparse matrix. Note that if ids is not None, the returned array\n will always be dense.\n Returns:\n A pandas DataFrame with study IDs in rows and features incolumns.", "id": "f5203:c2:m3"} {"signature": "def save_images_to_file(self, ids, outroot=''):", "body": "pass", "docstring": "Reconstructs vectorized images corresponding to the specified\n study ids and saves them to file, prepending with the outroot\n (default: current directory).", "id": "f5203:c1:m3"} {"signature": "def _load_activations(self, filename):", "body": "logger.info(\"\" % filename)activations = pd.read_csv(filename, sep='')activations.columns = [col.lower()for col in list(activations.columns)]mc = ['', '', '', '', '']if (set(mc) - set(list(activations.columns))):logger.error(\"\"\"\")returnspaces = activations[''].unique()xyz = activations[['', '', '']].valuesfor s in spaces:if s != self.transformer.target:inds = activations[''] == sxyz[inds] = self.transformer.apply(s, xyz[inds])activations[['', '', '']] = xyzijk = pd.DataFrame(transformations.xyz_to_mat(xyz), columns=['', '', ''])activations = pd.concat([activations, ijk], axis=)return activations", "docstring": "Load activation data from a text file.\n\n Args:\n filename (str): a string pointing to the location of the txt file\n to read from.", "id": "f5203:c0:m1"} {"signature": "def t88_to_mni():", "body": "return np.array([[, , -, -],[-, , -, -],[, , , ],[, , , ]]).T", "docstring": "Convert Talairach to MNI coordinates using the Lancaster transform.\n Adapted from BrainMap scripts; see http://brainmap.org/icbm2tal/\n Details are described in Lancaster et al. (2007)\n (http://brainmap.org/new/pubs/LancasterHBM07.pdf).", "id": "f5205:m3"} {"signature": "def mat_to_xyz(foci, mat_dims=None, xyz_dims=None):", "body": "foci = np.hstack((foci, np.ones((foci.shape[], ))))mat = np.array([[-, , , ], [, , , -], [, , , -]]).Tresult = np.dot(foci, mat)[:, ::-] return np.round_(result).astype(int)", "docstring": "Convert an N x 3 array of matrix indices to XYZ coordinates.", "id": "f5205:m2"} {"signature": "def classify_regions(dataset, masks, method='', threshold=,remove_overlap=True, regularization='',output='', studies=None, features=None,class_weight='', classifier=None,cross_val='', param_grid=None, scoring=''):", "body": "(X, y) = get_studies_by_regions(dataset, masks, threshold, remove_overlap,studies, features,regularization=regularization)return classify(X, y, method, classifier, output, cross_val,class_weight, scoring=scoring, param_grid=param_grid)", "docstring": "Perform classification on specified regions\n\n Given a set of masks, this function retrieves studies associated with\n each mask at the specified threshold, optionally removes overlap and\n filters by studies and features. Then it trains an algorithm to\n classify studies based on features and tests performance.\n\n Args:\n dataset: a Neurosynth dataset\n maks: a list of paths to Nifti masks\n method: a string indicating which method to used.\n 'SVM': Support Vector Classifier with rbf kernel\n 'ERF': Extremely Randomized Forest classifier\n 'Dummy': A dummy classifier using stratified classes as\n predictor\n threshold: percentage of voxels active within the mask for study\n to be included\n remove_overlap: A boolean indicating if studies studies that\n appear in more than one mask should be excluded\n regularization: A string indicating type of regularization to use.\n If None, performs no regularization.\n 'scale': Unit scale without demeaning\n output: A string indicating output type\n 'summary': Dictionary with summary statistics including score\n and n\n 'summary_clf': Same as above but also includes classifier\n 'clf': Only returns classifier\n Warning: using cv without grid will return an untrained\n classifier\n studies: An optional list of study names used to constrain the set\n used in classification. If None, will use all features in the\n dataset.\n features: An optional list of feature names used to constrain the\n set used in classification. If None, will use all features in\n the dataset.\n class_weight: Parameter to pass to classifier determining how to\n weight classes\n classifier: An optional sci-kit learn classifier to use instead of\n pre-set up classifiers set up using 'method'\n cross_val: A string indicating type of cross validation to use.\n Can also pass a scikit_classifier\n param_grid: A dictionary indicating which parameters to optimize\n using GridSearchCV. If None, no GridSearch will be used\n\n Returns:\n A tuple (X, y) of np arrays.\n X is a feature by studies matrix and y is a vector of class labels", "id": "f5208:m6"} {"signature": "def get_feature_order(dataset, features):", "body": "all_features = dataset.get_feature_names()i = [all_features.index(f) for f in features]return i", "docstring": "Returns a list with the order that features requested appear in\n dataset", "id": "f5208:m5"} {"signature": "def _pearson_correlation(self, imgs_to_decode):", "body": "x, y = imgs_to_decode.astype(float), self.feature_images.astype(float)return self._xy_corr(x, y)", "docstring": "Decode images using Pearson's r.\n\n Computes the correlation between each input image and each feature\n image across voxels.\n\n Args:\n imgs_to_decode: An ndarray of images to decode, with voxels in rows\n and images in columns.\n\n Returns:\n An n_features x n_images 2D array, with each cell representing the\n pearson correlation between the i'th feature and the j'th image\n across all voxels.", "id": "f5212:c0:m8"} {"signature": "def load_features(self, features, image_type=None, from_array=False,threshold=):", "body": "if from_array:if isinstance(features, list):features = features[]self._load_features_from_array(features)elif path.exists(features[]):self._load_features_from_images(features)else:self._load_features_from_dataset(features, image_type=image_type, threshold=threshold)", "docstring": "Load features from current Dataset instance or a list of files.\n Args:\n features: List containing paths to, or names of, features to\n extract. Each element in the list must be a string containing\n either a path to an image, or the name of a feature (as named\n in the current Dataset). Mixing of paths and feature names\n within the list is not allowed.\n image_type: Optional suffix indicating which kind of image to use\n for analysis. Only used if features are taken from the Dataset;\n if features is a list of filenames, image_type is ignored.\n from_array: If True, the features argument is interpreted as a\n string pointing to the location of a 2D ndarray on disk\n containing feature data, where rows are voxels and columns are\n individual features.\n threshold: If features are taken from the dataset, this is the\n threshold passed to the meta-analysis module to generate fresh\n images.", "id": "f5212:c0:m3"} {"signature": "def decode(self, images, save=None, round=, names=None, **kwargs):", "body": "if isinstance(images, string_types):images = [images]if isinstance(images, list):imgs_to_decode = imageutils.load_imgs(images, self.masker)else:imgs_to_decode = imagesmethods = {'': self._pearson_correlation,'': self._dot_product,'': self._roi_association}result = np.around(methods[self.method](imgs_to_decode, **kwargs), round)if names is None:if type(images).__module__ == np.__name__:names = ['' % i for i in range(images.shape[])]elif self.method == '':names = ['' % i for i in range(result.shape[])]else:names = imagesresult = pd.DataFrame(result, columns=names, index=self.feature_names)if save is not None:result.to_csv(save, index_label='')return result", "docstring": "Decodes a set of images.\n\n Args:\n images: The images to decode. Can be:\n - A single String specifying the filename of the image to decode\n - A list of filenames\n - A single NumPy array containing the image data\n save: Optional filename to save results to. If None (default), returns\n all results as an array.\n round: Optional integer indicating number of decimals to round result\n to. Defaults to 4.\n names: Optional list of names corresponding to the images in filenames.\n If passed, must be of same length and in same order as filenames.\n By default, the columns in the output will be named using the image\n filenames.\n\n Returns:\n An n_features x n_files numpy array, where each feature is a row and\n each image is a column. The meaning of the values depends on the\n decoding method used.", "id": "f5212:c0:m1"} {"signature": "def _load_features_from_dataset(self, features=None, image_type=None,threshold=):", "body": "self.feature_names = self.dataset.feature_table.feature_namesif features is not None:self.feature_names = [f for f in featuresif f in self.feature_names]from neurosynth.analysis import metaself.feature_images = meta.analyze_features(self.dataset, self.feature_names, image_type=image_type,threshold=threshold)if self.masker.layers:in_mask = self.masker.get_mask(in_global_mask=True)self.feature_images = self.feature_images[in_mask, :]", "docstring": "Load feature image data from the current Dataset instance. See\n load_features() for documentation.", "id": "f5212:c0:m5"} {"signature": "def two_way(cells):", "body": "warnings.simplefilter(\"\", RuntimeWarning)cells = cells.astype('') total = np.apply_over_axes(np.sum, cells, [, ]).ravel()chi_sq = np.zeros(cells.shape, dtype='')for i in range():for j in range():exp = np.sum(cells[:, i, :], ).ravel() *np.sum(cells[:, :, j], ).ravel() / totalbad_vox = np.where(exp == )[]chi_sq[:, i, j] = (cells[:, i, j] - exp) ** / expchi_sq[bad_vox, i, j] = chi_sq = np.apply_over_axes(np.sum, chi_sq, [, ]).ravel()return special.chdtrc(, chi_sq)", "docstring": "Two-way chi-square test of independence.\n Takes a 3D array as input: N(voxels) x 2 x 2, where the last two dimensions\n are the contingency table for each of N voxels. Returns an array of\n p-values.", "id": "f5213:m1"} {"signature": "def account_info(self):", "body": "return self._get('')", "docstring": "Requests everything account related (total used storage, reward, ...).\n\n Returns:\n dict: dictionary containing account related info. ::\n\n {\n \"extid\": \"extuserid\",\n \"email\": \"jeff@openload.io\",\n \"signup_at\": \"2015-01-09 23:59:54\",\n \"storage_left\": -1,\n \"storage_used\": \"32922117680\",\n \"traffic\": {\n \"left\": -1,\n \"used_24h\": 0\n },\n \"balance\": 0\n }", "id": "f5237:c0:m4"} {"signature": "def _get(self, url, params=None):", "body": "if not params:params = {}params.update({'': self.login, '': self.key})response_json = requests.get(self.api_url + url, params).json()return self._process_response(response_json)", "docstring": "Used by every other method, it makes a GET request with the given params.\n\n Args:\n url (str): relative path of a specific service (account_info, ...).\n params (:obj:`dict`, optional): contains parameters to be sent in the GET request.\n\n Returns:\n dict: results of the response of the GET request.", "id": "f5237:c0:m3"} {"signature": "def get_download_link(self, file_id, ticket, captcha_response=None):", "body": "params = {'': ticket, '': file_id}if captcha_response:params[''] = captcha_responsereturn self._get('', params)", "docstring": "Requests direct download link for requested file,\n this method makes use of the response of prepare_download, prepare_download must be called first.\n\n Args:\n file_id (str): id of the file to be downloaded.\n\n ticket (str): preparation ticket is found in prepare_download response,\\\n this is why we need to call prepare_download before get_download_link.\n\n captcha_response (:obj:`str`, optional): sometimes prepare_download will have captcha url to be solved, \\\n first, this is the solution of the captcha.\n\n Returns:\n dict: dictionary containing (file info, download url, ...). ::\n\n {\n \"name\": \"The quick brown fox.txt\",\n \"size\": 12345,\n \"sha1\": \"2fd4e1c67a2d28fced849ee1bb76e7391b93eb12\",\n \"content_type\": \"plain/text\",\n \"upload_at\": \"2011-01-26 13:33:37\",\n \"url\": \"https://abvzps.example.com/dl/l/4spxX_-cSO4/The+quick+brown+fox.txt\",\n \"token\": \"4spxX_-cSO4\"\n }", "id": "f5237:c0:m6"} {"signature": "@classmethoddef _check_status(cls, response_json):", "body": "status = response_json['']msg = response_json['']if status == :raise BadRequestException(msg)elif status == :raise PermissionDeniedException(msg)elif status == :raise FileNotFoundException(msg)elif status == :raise UnavailableForLegalReasonsException(msg)elif status == :raise BandwidthUsageExceeded(msg)elif status >= :raise ServerErrorException(msg)", "docstring": "Check the status of the incoming response, raise exception if status is not 200.\n\n Args:\n response_json (dict): results of the response of the GET request.\n\n Returns:\n None", "id": "f5237:c0:m1"} {"signature": "def rename_file(self, file_id, name):", "body": "return self._get('', params={'': file_id, '': name})", "docstring": "Sets a new name for a file\n\n Args:\n file_id (str): id of the file to be renamed.\n name (str): new name for the provided file.\n\n Returns:\n bool: True if file is renamed, otherwise False.", "id": "f5237:c0:m14"} {"signature": "def file_info(self, file_id):", "body": "return self._get('', params={'': file_id})", "docstring": "Used to request info for a specific file, info like size, name, .....\n\n Args:\n file_id (str): File-ID(s), single file or comma-separated (max. 50)\n\n Returns:\n dict: dictionary containing file(s) info, each key represents a file_id. ::\n\n {\n \"72fA-_Lq8Ak3\": {\n \"id\": \"72fA-_Lq8Ak3\",\n \"status\": 200,\n \"name\": \"The quick brown fox.txt\",\n \"size\": 123456789012,\n \"sha1\": \"2fd4e1c67a2d28fced849ee1bb76e7391b93eb12\",\n \"content_type\": \"plain/text\",\n },\n \"72fA-_Lq8Ak4\": {\n \"id\": \"72fA-_Lq8Ak4\",\n \"status\": 500,\n \"name\": \"The quick brown fox.txt\",\n \"size\": false,\n \"sha1\": \"2fd4e1c67a2d28fced849ee1bb76e7391b93eb12\",\n \"content_type\": \"plain/text\",\n },\n ...\n }", "id": "f5237:c0:m7"} {"signature": "def delete_file(self, file_id):", "body": "return self._get('', params={'': file_id})", "docstring": "Removes one of your files\n\n Args:\n file_id (str): id of the file to be deleted.\n\n Returns:\n bool: True if file is deleted, otherwise False.", "id": "f5237:c0:m15"} {"signature": "def splash_image(self, file_id):", "body": "return self._get('', params={'': file_id})", "docstring": "Shows the video splash image (thumbnail)\n\n Args:\n file_id (str): id of the target file.\n\n Returns:\n str: url for the splash image.", "id": "f5237:c0:m19"} {"signature": "def parse_commits(data):", "body": "raw_commits = RE_COMMIT.finditer(data)for rc in raw_commits:full_commit = rc.groups()[]parts = RE_COMMIT.match(full_commit).groupdict()parsed_commit = parse_commit(parts)yield parsed_commit", "docstring": "Accept a string and parse it into many commits.\n Parse and yield each commit-dictionary.\n This function is a generator.", "id": "f5245:m0"} {"signature": "def get_ext(name):", "body": "other, sep, ext = name.partition('')return ext", "docstring": "Return the extension only for a name (like a filename)\n\n>>> get_ext('foo.bar')\n'bar'\n>>> get_ext('.only')\n'only'\n>>> get_ext('')\n''\n>>> get_ext('noext')\n''\n>>> get_ext('emptyext.')\n''\n\nNote that for any non-empty string, the result will never be the\nsame as the input. This is a useful property for basepack.", "id": "f5249:m2"} {"signature": "def save(self, target=None):", "body": "target = target or getattr(self, '', None)if isinstance(target, six.string_types):self.filename = targettarget = open(target, '')if target is None:msg = (\"\"% self.__class__.__name__)raise ValueError(msg)self._store(target)", "docstring": "Save this package to target, which should be a filename or open\nfile stream. If target is not supplied, and this package has a\nfilename attribute (such as when this package was created from\nan existing file), it will be used.", "id": "f5251:c0:m3"} {"signature": "def __getitem__(self, name):", "body": "return self.parts[name]", "docstring": "Returns a part from the given URL.", "id": "f5255:c1:m2"} {"signature": "def related(self, reltype):", "body": "parts = []package = getattr(self, '', None) or selffor rel in self.relationships.types.get(reltype, []):parts.append(package[posixpath.join(self.base, rel.target)])return parts", "docstring": "Return a list of parts related to this one via reltype.", "id": "f5255:c0:m1"} {"signature": "def __new__(mcs, name, bases, attrs):", "body": "cls = type.__new__(mcs, name, bases, attrs)if not hasattr(cls, ''):cls.classes_by_rel_type = defaultdict(lambda: cls)rt = attrs.get('', None)if rt:cls.classes_by_rel_type[rt] = clsreturn cls", "docstring": "This is called when a new class is created of this type", "id": "f5255:c3:m0"} {"signature": "def find_for(self, name):", "body": "map = self.itemsreturn map.get(name, None) or map.get(get_ext(name) or None, None)", "docstring": "Get the correct content type for a given name", "id": "f5255:c7:m7"} {"signature": "def _load_content_types(self, source):", "body": "self.content_types.update(ContentTypes.load(source))", "docstring": "Load up the content_types object with value from source XML.", "id": "f5255:c1:m11"} {"signature": "def derived(self, name, relative_coords, formula):", "body": "relZ, relN = relative_coordsdaughter_idx = [(x[] + relZ, x[] + relN) for x in self.df.index]values = formula(self.df.values, self.df.loc[daughter_idx].values)return Table(df=pd.Series(values, index=self.df.index, name=name + '' + self.name + ''))", "docstring": "Helper function for derived quantities", "id": "f5263:c0:m38"} {"signature": "@property@memoizedef s1p(self):", "body": "M_P = f = lambda parent, daugther: -parent + daugther + M_Preturn self.derived('', (-, ), f)", "docstring": "Return 1 proton separation energy", "id": "f5263:c0:m37"} {"signature": "@property@memoizedef s1n(self):", "body": "M_N = f = lambda parent, daugther: -parent + daugther + M_Nreturn self.derived('', (, -), f)", "docstring": "Return 1 neutron separation energy", "id": "f5263:c0:m35"} {"signature": "@propertydef A(self):", "body": "return self.Z + self.N", "docstring": "Return the mass number A for all nuclei in the table as a numpy array.", "id": "f5263:c0:m9"} {"signature": "def select(self, condition, name=''):", "body": "if condition.__code__.co_argcount == :idx = [(Z, N) for (Z, N), M in self if condition(M)]if condition.__code__.co_argcount == :idx = [(Z, N) for (Z, N) in self.index if condition(Z, N)]if condition.__code__.co_argcount == :idx = [(Z, N) for (Z, N), M in self if condition(Z, N, M)]index = pd.MultiIndex.from_tuples(idx, names=['', ''])return Table(df=self.df.ix[index], name=name)", "docstring": "Selects nuclei according to a condition on Z,N or M\n\nParameters\n----------\ncondition : function,\n Can have one of the signatures f(M), f(Z,N) or f(Z, N, M)\n must return a boolean value\nname: string, optional name for the resulting Table\n\nExample:\n--------\nSelect all nuclei with A > 160:\n\n>>> A_gt_160 = lambda Z,N: Z + N > 160\n>>> Table('AME2003').select(A_gt_160)", "id": "f5263:c0:m18"} {"signature": "@classmethoddef from_name(cls, name):", "body": "filename = os.path.join(package_dir, '', name + '')return cls.from_file(filename, name)", "docstring": "Imports a mass table from a file", "id": "f5263:c0:m2"} {"signature": "@propertydef Z(self):", "body": "return self.df.index.get_level_values('').values", "docstring": "Return the proton number Z for all nuclei in the table as a numpy array.", "id": "f5263:c0:m7"} {"signature": "@property@memoizedef even_odd(self):", "body": "return self.select(lambda Z, N: not(Z % ) and (N % ), name=self.name)", "docstring": "Selects even-odd nuclei from the table", "id": "f5263:c0:m27"} {"signature": "@property@memoizedef odd_odd(self):", "body": "return self.select(lambda Z, N: (Z % ) and (N % ), name=self.name)", "docstring": "Selects odd-odd nuclei from the table:\n\n >>> Table('FRDM95').odd_odd\n Out[13]:\n Z N\n 9 9 1.21\n 11 0.10\n 13 3.08\n 15 9.32\n ...", "id": "f5263:c0:m25"} {"signature": "def __init__(self, name='', df=None):", "body": "if df is not None: self.df = dfself.name = nameelif name in self._names: self.name = nameself.df = self.from_name(name).dfelse:print ('')print((''.join(Table.names)))return None", "docstring": "Init from a Series/Dataframe (df) of a file (name)", "id": "f5263:c0:m0"} {"signature": "@propertydef count(self):", "body": "return len(self.df)", "docstring": "Return the total number of nuclei in the table:\n\n >>> Table('AME2012').count\n 2438\n\n It is also possible to do:\n\n >>> len(Table('AME2012'))\n 2438", "id": "f5263:c0:m22"} {"signature": "def error(self, relative_to=''):", "body": "df = self.df - Table(relative_to).dfreturn Table(df=df)", "docstring": "Calculate error difference\n\nParameters\n----------\nrelative_to : string,\n a valid mass table name.\n\nExample:\n----------\n>>> Table('DUZU').error(relative_to='AME2003')", "id": "f5263:c0:m29"} {"signature": "@property@memoizedef s2p(self):", "body": "M_P = f = lambda parent, daugther: -parent + daugther + * M_Preturn self.derived('', (-, ), f)", "docstring": "Return 2 proton separation energy", "id": "f5263:c0:m36"} {"signature": "@classmethoddef from_file(cls, filename, name=''):", "body": "df = pd.read_csv(filename, header=, delim_whitespace=True, index_col=[, ])['']df.name = namereturn cls(df=df, name=name)", "docstring": "Imports a mass table from a file", "id": "f5263:c0:m3"} {"signature": "@property@memoizedef q_alpha(self):", "body": "M_ALPHA = f = lambda parent, daugther: parent - daugther - M_ALPHAreturn self.derived('', (-, -), f)", "docstring": "Return Q_alpha", "id": "f5263:c0:m32"} {"signature": "def chart_plot(self, ax=None, cmap='',xlabel='', ylabel='', grid_on=True, colorbar=True):", "body": "from matplotlib.mlab import griddatafrom numpy import linspace, meshgridimport matplotlib.pyplot as pltx = self.dropna().Ny = self.dropna().Zz = self.dropna().valuesxi = linspace(min(x), max(x), max(x) - min(x) + )yi = linspace(min(y), max(y), max(y) - min(y) + )Z = griddata(x, y, z, xi, yi)X, Y = meshgrid(xi, yi)if ax is None:ax = plt.gca()chart = ax.pcolormesh(X, Y, Z, cmap=cmap)ax.set_xlabel(xlabel)ax.set_ylabel(ylabel)ax.grid(grid_on)ax.set_aspect('')if colorbar:plt.colorbar(chart)return ax", "docstring": "Plot a nuclear chart with (N,Z) as axis and the values\n of the Table as a color scale\n\n Parameters\n ----------\n ax: optional matplotlib axes\n defaults to current axes\n cmap: a matplotlib colormap\n default: 'RdBu'\n xlabel: string representing the label of the x axis\n default: 'N'\n ylabel: string, default: 'Z'\n the label of the x axis\n grid_on: boolean, default: True,\n whether to draw the axes grid or not\n colorbar: boolean, default: True\n whether to draw a colorbar or not\n\n Returns\n -------\n ax: a matplotlib axes object\n\n Example\n -------\n Plot the theoretical deviation for the M\u00f6ller's model::\n\n >>> Table('FRDM95').error().chart_plot()", "id": "f5263:c0:m44"} {"signature": "def _add_ones_dim(arr):", "body": "arr = arr[..., np.newaxis]return np.concatenate((arr, np.ones_like(arr)), axis=-)", "docstring": "Adds a dimensions with ones to array.", "id": "f5268:m2"} {"signature": "@command(aliases='')def fm(client, event, channel, nick, rest):", "body": "if rest:rest = rest.strip()Karma.store.change(rest, )rcpt = restelse:rcpt = channelreturn f''", "docstring": "pmxbot parle fran\u00e7ais", "id": "f5280:m2"} {"signature": "def op_and(self, *elements):", "body": "expression = self.add_operator(Operator(''))for element in elements:expression.add_element(element)return expression", "docstring": "Update the ``Expression`` by joining the specified additional\n ``elements`` using an \"AND\" ``Operator``\n\n Args:\n *elements (BaseExpression): The ``Expression`` and/or\n ``Constraint`` elements which the \"AND\" ``Operator`` applies\n to.\n\n Returns:\n Expression: ``self`` or related ``Expression``.", "id": "f5318:c1:m5"} {"signature": "def __init__(self):", "body": "self.parent = None", "docstring": "Initialize instance of ``BaseExpression``.", "id": "f5318:c0:m0"} {"signature": "def create_nested_expression(self):", "body": "sub = Expression()self.add_element(sub)return sub", "docstring": "Create a nested ``Expression``, add it as an element to this\n ``Expression``, and return it.\n\n Returns:\n Expression: The newly created nested ``Expression``.", "id": "f5318:c1:m4"} {"signature": "def __init__(self):", "body": "super(Expression, self).__init__()self.elements = []self.operator = Noneself._working_fragment = selfself._last_element = None", "docstring": "Initialize instance of ``Expression``.", "id": "f5318:c1:m0"} {"signature": "def to_python(self):", "body": "if len(self.elements) == :return Noneif len(self.elements) == :return self.elements[].to_python()operator = self.operator or Operator('')return [operator.to_python()] +[elem.to_python() for elem in self.elements]", "docstring": "Deconstruct the ``Expression`` instance to a list or tuple\n (If ``Expression`` contains only one ``Constraint``).\n\n Returns:\n list or tuple: The deconstructed ``Expression``.", "id": "f5318:c1:m7"} {"signature": "def add_operator(self, operator):", "body": "if not isinstance(operator, Operator):raise FiqlObjectException(\"\" % (operator.__class__))if not self._working_fragment.operator:self._working_fragment.operator = operatorelif operator > self._working_fragment.operator:last_constraint = self._working_fragment.elements.pop()self._working_fragment = self._working_fragment.create_nested_expression()self._working_fragment.add_element(last_constraint)self._working_fragment.add_operator(operator)elif operator < self._working_fragment.operator:if self._working_fragment.parent:return self._working_fragment.parent.add_operator(operator)else:return Expression().add_element(self._working_fragment).add_operator(operator)return self", "docstring": "Add an ``Operator`` to the ``Expression``.\n\n The ``Operator`` may result in a new ``Expression`` if an ``Operator``\n already exists and is of a different precedence.\n\n There are three possibilities when adding an ``Operator`` to an\n ``Expression`` depending on whether or not an ``Operator`` already\n exists:\n\n - No ``Operator`` on the working ``Expression``; Simply set the\n ``Operator`` and return ``self``.\n - ``Operator`` already exists and is higher in precedence; The\n ``Operator`` and last ``Constraint`` belong in a sub-expression of\n the working ``Expression``.\n - ``Operator`` already exists and is lower in precedence; The\n ``Operator`` belongs to the parent of the working ``Expression``\n whether one currently exists or not. To remain in the context of\n the top ``Expression``, this method will return the parent here\n rather than ``self``.\n\n Args:\n operator (Operator): What we are adding.\n\n Returns:\n Expression: ``self`` or related ``Expression``.\n\n Raises:\n FiqlObjectExpression: Operator is not a valid ``Operator``.", "id": "f5318:c1:m2"} {"signature": "def __eq__(self, other):", "body": "return OPERATOR_MAP[self.value][] == OPERATOR_MAP[other.value][]", "docstring": "Of equal precendence.\n\n Args:\n other (Operator): The ``Operator`` we are comparing precedence\n against.\n\n Returns:\n boolean: ``True`` if of equal precendence of ``other``.", "id": "f5320:c0:m4"} {"signature": "def render_login_result(framework_name, result):", "body": "reload_module(sys)if six.PY2:sys.setdefaultencoding('')response = Noneoriginal_credentials = {}refreshed_credentials = {}if result:if result.user:result.user.update()if result.user.credentials:original_credentials.update(result.user.credentials.__dict__)time.sleep()response = result.user.credentials.refresh(force=True)refreshed_credentials.update(result.user.credentials.__dict__)user_properties = ['', '', '', '','', '', '', '', '','', '', '', '', '','', '', '', '']access_token_content = Noneif hasattr(result.provider, ''):access_token_content = result.provider.access_token_response.content template = env.get_template('')return template.render(result=result,providers=ASSEMBLED_CONFIG.values(),oauth2_providers=OAUTH2_PROVIDERS,oauth1_providers=OAUTH1_PROVIDERS,openid_providers=OPENID_PROVIDERS,user_properties=user_properties,error=result.error,credentials_response=response,original_credentials=original_credentials,refreshed_credentials=refreshed_credentials,framework_name=framework_name,access_token_content=access_token_content,birth_date_format=BIRTH_DATE_FORMAT)", "docstring": "Renders the login handler.\n\n:param result:\n\n The :class:`.authomatic.core.LoginResult` returned by the\n :meth:`.authomatic.Authomatic.login` method.", "id": "f5358:m1"} {"signature": "def with_metaclass(meta, *bases):", "body": "class metaclass(meta):def __new__(cls, name, this_bases, d):return meta(name, bases, d)return type.__new__(metaclass, '', (), {})", "docstring": "Create a base class with a metaclass.", "id": "f5359:m7"} {"signature": "def _import_module(name):", "body": "__import__(name)return sys.modules[name]", "docstring": "Import module, returning the module after the last dot.", "id": "f5359:m1"} {"signature": "@abc.abstractmethoddef write(self, value):", "body": "", "docstring": "Must write specified value to response.\n\n:param str value:\n String to be written to response.", "id": "f5360:c0:m3"} {"signature": "@abc.abstractmethoddef values(self):", "body": "", "docstring": "Same as :meth:`dict.values`.", "id": "f5362:c1:m1"} {"signature": "@abc.abstractmethoddef save(self):", "body": "", "docstring": "Called only once per request.\n\nShould implement a mechanism for setting the the session\n**cookie** and saving the session **data** to storage.", "id": "f5362:c0:m0"} {"signature": "@classmethoddef initialize(cls):", "body": "if not len(cls.query().fetch()):example = cls.get_or_insert('')example.class_ = '' +''example.provider_name = ''example.consumer_key = ''example.consumer_secret = ''example.provider_id = example.scope = ''example.identifier_param = '' +''example.put()raise GAEError('''')", "docstring": "Creates an **\"Example\"** entity of kind **\"NDBConfig\"** in the\ndatastore if the model is empty and raises and error to inform you that\nyou should populate the model with data.\n\n.. note::\n\n The *Datastore Viewer* in the ``_ah/admin/`` won't let you add\n properties to a model if there is not an entity with that\n property already. Therefore it is a good idea to keep the\n **\"Example\"** entity (which has all possible properties set) in\n the datastore.", "id": "f5364:c2:m2"} {"signature": "@classmethoddef values(cls):", "body": "results = cls.query().fetch()return [result.to_dict() for result in results]", "docstring": "Resembles the :meth:`dict.values` method.", "id": "f5364:c2:m1"} {"signature": "def backend(self, adapter):", "body": "AUTHOMATIC_HEADER = ''request_type = adapter.params.get('', '')json_input = adapter.params.get('')credentials = adapter.params.get('')url = adapter.params.get('')method = adapter.params.get('', '')body = adapter.params.get('', '')params = adapter.params.get('')params = json.loads(params) if params else {}headers = adapter.params.get('')headers = json.loads(headers) if headers else {}ProviderClass = Credentials.deserialize(self.config, credentials).provider_classif request_type == '':jsonp = params.get('')if ProviderClass.supports_jsonp and method is '':request_type = ''else:if jsonp:params.pop('')request_type = ''if request_type == '':response = self.access(credentials, url, params, method, headers, body)result = response.contentadapter.status = str(response.status) + '' + str(response.reason)for k, v in response.getheaders():logging.info(''.format(k, v))adapter.set_header(k, v)elif request_type == '':if json_input:result = self.request_elements(json_input=json_input, return_json=True)else:result = self.request_elements(credentials=credentials,url=url,method=method,params=params,headers=headers,body=body,return_json=True)adapter.set_header('', '')else:result = ''adapter.set_header(AUTHOMATIC_HEADER, request_type)adapter.write(result)", "docstring": "Converts a *request handler* to a JSON backend which you can use with\n:ref:`authomatic.js `.\n\nJust call it inside a *request handler* like this:\n\n::\n\n class JSONHandler(webapp2.RequestHandler):\n def get(self):\n authomatic.backend(Webapp2Adapter(self))\n\n:param adapter:\n The only argument is an :doc:`adapter `.\n\nThe *request handler* will now accept these request parameters:\n\n:param str type:\n Type of the request. Either ``auto``, ``fetch`` or ``elements``.\n Default is ``auto``.\n\n:param str credentials:\n Serialized :class:`.Credentials`.\n\n:param str url:\n URL of the **protected resource** request.\n\n:param str method:\n HTTP method of the **protected resource** request.\n\n:param str body:\n HTTP body of the **protected resource** request.\n\n:param JSON params:\n HTTP params of the **protected resource** request as a JSON object.\n\n:param JSON headers:\n HTTP headers of the **protected resource** request as a\n JSON object.\n\n:param JSON json:\n You can pass all of the aforementioned params except ``type``\n in a JSON object.\n\n .. code-block:: javascript\n\n {\n \"credentials\": \"######\",\n \"url\": \"https://example.com\",\n \"method\": \"POST\",\n \"params\": {\"foo\": \"bar\"},\n \"headers\": {\"baz\": \"bing\"},\n \"body\": \"the body of the request\"\n }\n\nDepending on the ``type`` param, the handler will either write\na JSON object with *request elements* to the response,\nand add an ``Authomatic-Response-To: elements`` response header, ...\n\n.. code-block:: javascript\n\n {\n \"url\": \"https://example.com/api\",\n \"method\": \"POST\",\n \"params\": {\n \"access_token\": \"###\",\n \"foo\": \"bar\"\n },\n \"headers\": {\n \"baz\": \"bing\",\n \"Authorization\": \"Bearer ###\"\n }\n }\n\n... or make a fetch to the **protected resource** and forward\nit's response content, status and headers with an additional\n``Authomatic-Response-To: fetch`` header to the response.\n\n.. warning::\n\n The backend will not work if you write anything to the\n response in the handler!", "id": "f5366:c11:m6"} {"signature": "def popup_js(self, callback_name=None, indent=None,custom=None, stay_open=False):", "body": "custom_callback = \"\"\"\"\"\".format(cb=callback_name) if callback_name else ''return \"\"\"\"\"\".format(result=self.to_json(indent),custom=json.dumps(custom),custom_callback=custom_callback,stay_open='' if stay_open else '')", "docstring": "Returns JavaScript that:\n\n#. Triggers the ``options.onLoginComplete(result, closer)``\n handler set with the :ref:`authomatic.setup() `\n function of :ref:`javascript.js `.\n#. Calls the JavasScript callback specified by :data:`callback_name`\n on the opener of the *login handler popup* and passes it the\n *login result* JSON object as first argument and the `closer`\n function which you should call in your callback to close the popup.\n\n:param str callback_name:\n The name of the javascript callback e.g ``foo.bar.loginCallback``\n will result in ``window.opener.foo.bar.loginCallback(result);``\n in the HTML.\n\n:param int indent:\n The number of spaces to indent the JSON result object.\n If ``0`` or negative, only newlines are added.\n If ``None``, no newlines are added.\n\n:param custom:\n Any JSON serializable object that will be passed to the\n ``result.custom`` attribute.\n\n:param str stay_open:\n If ``True``, the popup will stay open.\n\n:returns:\n :class:`str` with JavaScript.", "id": "f5366:c7:m1"} {"signature": "def __init__(self, adapter, secret, name='', max_age=,secure=False):", "body": "self.adapter = adapterself.name = nameself.secret = secretself.max_age = max_ageself.secure = secureself._data = {}", "docstring": ":param str secret:\n Session secret used to sign the session cookie.\n:param str name:\n Session cookie name.\n:param int max_age:\n Maximum allowed age of session cookie nonce in seconds.\n:param bool secure:\n If ``True`` the session cookie will be saved with ``Secure``\n attribute.", "id": "f5366:c3:m0"} {"signature": "def __init__(self, config, secret, session_max_age=, secure_cookie=False,session=None, session_save_method=None, report_errors=True,debug=False, logging_level=logging.INFO, prefix='',logger=None):", "body": "self.config = configself.secret = secretself.session_max_age = session_max_ageself.secure_cookie = secure_cookieself.session = sessionself.session_save_method = session_save_methodself.report_errors = report_errorsself.debug = debugself.logging_level = logging_levelself.prefix = prefixself._logger = logger or logging.getLogger(str(id(self)))if logger is None:self._logger.setLevel(logging_level)", "docstring": "Encapsulates all the functionality of this package.\n\n:param dict config:\n :doc:`config`\n\n:param str secret:\n A secret string that will be used as the key for signing\n :class:`.Session` cookie and as a salt by *CSRF* token generation.\n\n:param session_max_age:\n Maximum allowed age of :class:`.Session` cookie nonce in seconds.\n\n:param bool secure_cookie:\n If ``True`` the :class:`.Session` cookie will be saved wit\n ``Secure`` attribute.\n\n:param session:\n Custom dictionary-like session implementation.\n\n:param callable session_save_method:\n A method of the supplied session or any mechanism that saves the\n session data and cookie.\n\n:param bool report_errors:\n If ``True`` exceptions encountered during the **login procedure**\n will be caught and reported in the :attr:`.LoginResult.error`\n attribute.\n Default is ``True``.\n\n:param bool debug:\n If ``True`` traceback of exceptions will be written to response.\n Default is ``False``.\n\n:param int logging_level:\n The logging level threshold for the default logger as specified in\n the standard Python\n `logging library `_.\n This setting is ignored when :data:`logger` is set.\n Default is ``logging.INFO``.\n\n:param str prefix:\n Prefix used as the :class:`.Session` cookie name.\n\n:param logger:\n A :class:`logging.logger` instance.", "id": "f5366:c11:m0"} {"signature": "@propertydef params(self):", "body": "return self[]", "docstring": "Dictionary of request parameters.", "id": "f5366:c10:m3"} {"signature": "def getheaders(self):", "body": "return self.httplib_response.getheaders()", "docstring": "Same as :meth:`httplib.HTTPResponse.getheaders`.", "id": "f5366:c8:m4"} {"signature": "@propertydef full_url(self):", "body": "return self.url + '' + self.query_string", "docstring": "URL with query string.", "id": "f5366:c10:m7"} {"signature": "def normalize_dict(dict_):", "body": "return dict([(k, v[] if not isinstance(v, str) and len(v) == else v)for k, v in list(dict_.items())])", "docstring": "Replaces all values that are single-item iterables with the value of its\nindex 0.\n\n:param dict dict_:\n Dictionary to normalize.\n\n:returns:\n Normalized dictionary.", "id": "f5366:m0"} {"signature": "def create_cookie(self, delete=None):", "body": "value = '' if delete else self._serialize(self.data)split_url = parse.urlsplit(self.adapter.url)domain = split_url.netloc.split('')[]if '' not in domain:template = ''else:template = ('''')return template.format(name=self.name,value=value,domain=domain,path=split_url.path,secure='' if self.secure else '',expires='' if delete else '')", "docstring": "Creates the value for ``Set-Cookie`` HTTP header.\n\n:param bool delete:\n If ``True`` the cookie value will be ``deleted`` and the\n Expires value will be ``Thu, 01-Jan-1970 00:00:01 GMT``.", "id": "f5366:c3:m1"} {"signature": "@propertydef valid(self):", "body": "if self.expiration_time:return self.expiration_time > int(time.time())else:return True", "docstring": "``True`` if credentials are valid, ``False`` if expired.", "id": "f5366:c6:m6"} {"signature": "def async_update(self):", "body": "return Future(self.update)", "docstring": "Same as :meth:`.update` but runs asynchronously in a separate thread.\n\n.. warning::\n\n |async|\n\n:returns:\n :class:`.Future` instance representing the separate thread.", "id": "f5366:c4:m2"} {"signature": "def login(self, adapter, provider_name, callback=None,session=None, session_saver=None, **kwargs):", "body": "if provider_name:provider_settings = self.config.get(provider_name)if not provider_settings:raise ConfigError(''.format(provider_name))if not (session is None or session_saver is None):session = sessionsession_saver = session_saverelse:session = Session(adapter=adapter,secret=self.secret,max_age=self.session_max_age,name=self.prefix,secure=self.secure_cookie)session_saver = session.saveclass_ = provider_settings.get('')if not class_:raise ConfigError(''''.format(provider_name))ProviderClass = resolve_provider_class(class_)ProviderClass._logger = self._loggerprovider = ProviderClass(self,adapter=adapter,provider_name=provider_name,callback=callback,session=session,session_saver=session_saver,**kwargs)return provider.login()else:self.backend(adapter)", "docstring": "If :data:`provider_name` specified, launches the login procedure for\ncorresponding :doc:`provider ` and returns\n:class:`.LoginResult`.\n\nIf :data:`provider_name` is empty, acts like\n:meth:`.Authomatic.backend`.\n\n.. warning::\n\n The method redirects the **user** to the **provider** which in\n turn redirects **him/her** back to the *request handler* where\n it has been called.\n\n:param str provider_name:\n Name of the provider as specified in the keys of the :doc:`config`.\n\n:param callable callback:\n If specified the method will call the callback with\n :class:`.LoginResult` passed as argument and will return nothing.\n\n:param bool report_errors:\n\n.. note::\n\n Accepts additional keyword arguments that will be passed to\n :doc:`provider ` constructor.\n\n:returns:\n :class:`.LoginResult`", "id": "f5366:c11:m1"} {"signature": "@expire_in.setterdef expire_in(self, value):", "body": "if value:self._expiration_time = int(time.time()) + int(value)self._expire_in = value", "docstring": "Computes :attr:`.expiration_time` when the value is set.", "id": "f5366:c6:m2"} {"signature": "def _deserialize(self, value):", "body": "encoded, timestamp, signature = value.split('')if not signature == self._signature(self.name, encoded, timestamp):raise SessionError(''.format(signature))if int(timestamp) < int(time.time()) - self.max_age:return Nonedecoded = parse.unquote(encoded)deserialized = pickle.loads(decoded.encode(''))return deserialized", "docstring": "Deserializes and verifies the value created by :meth:`._serialize`.\n\n:param str value:\n The serialized value.\n\n:returns:\n Deserialized object.", "id": "f5366:c3:m8"} {"signature": "def refresh(self, force=False, soon=):", "body": "if hasattr(self.provider_class, ''):if force or self.expire_soon(soon):logging.info(''.format(self.provider_name))return self.provider_class(self, None, self.provider_name).refresh_credentials(self)", "docstring": "Refreshes the credentials only if the **provider** supports it and if\nit will expire in less than one day. It does nothing in other cases.\n\n.. note::\n\n The credentials will be refreshed only if it gives sense\n i.e. only |oauth2|_ has the notion of credentials\n *refreshment/extension*.\n And there are also differences across providers e.g. Google\n supports refreshment only if there is a ``refresh_token`` in\n the credentials and that in turn is present only if the\n ``access_type`` parameter was set to ``offline`` in the\n **user authorization request**.\n\n:param bool force:\n If ``True`` the credentials will be refreshed even if they\n won't expire soon.\n\n:param int soon:\n Number of seconds specifying what means *soon*.", "id": "f5366:c6:m8"} {"signature": "def __init__(self, *args, **kwargs):", "body": "super(OAuth2, self).__init__(*args, **kwargs)self.scope = self._kwarg(kwargs, '', [])self.offline = self._kwarg(kwargs, '', False)", "docstring": "Accepts additional keyword arguments:\n\n:param list scope:\n List of strings specifying requested permissions as described\n in the\n `OAuth 2.0 spec `_.\n\n:param bool offline:\n If ``True`` the **provider** will be set up to request an\n *offline access token*.\n Default is ``False``.\n\nAs well as those inherited from :class:`.AuthorizationProvider`\nconstructor.", "id": "f5369:c0:m0"} {"signature": "def _x_scope_parser(self, scope):", "body": "return ''.join(scope)", "docstring": "Google has space-separated scopes.", "id": "f5369:c10:m3"} {"signature": "def __init__(self, *args, **kwargs):", "body": "super(AuthorizationProvider, self).__init__(*args, **kwargs)self.consumer_key = self._kwarg(kwargs, '')self.consumer_secret = self._kwarg(kwargs, '')self.user_authorization_params = self._kwarg(kwargs, '', {})self.access_token_headers = self._kwarg(kwargs, '', {})self.access_token_params = self._kwarg(kwargs, '', {})self.id = self._kwarg(kwargs, '')self.access_headers = self._kwarg(kwargs, '', {})self.access_params = self._kwarg(kwargs, '', {})self.credentials = authomatic.core.Credentials(self.settings.config, provider=self)self.access_token_response = None", "docstring": "Accepts additional keyword arguments:\n\n:arg str consumer_key:\n The *key* assigned to our application (**consumer**) by the\n **provider**.\n\n:arg str consumer_secret:\n The *secret* assigned to our application (**consumer**) by the\n **provider**.\n\n:arg int id:\n A unique numeric ID used to serialize :class:`.Credentials`.\n\n:arg dict user_authorization_params:\n A dictionary of additional request parameters for\n **user authorization request**.\n\n:arg dict access_token_params:\n A dictionary of additional request parameters for\n **access_with_credentials token request**.\n\n:arg dict access_headers:\n A dictionary of default HTTP headers that will be used when\n accessing **user's** protected resources.\n Applied by :meth:`.access()`, :meth:`.update_user()` and\n :meth:`.User.update()`\n\n:arg dict access_params:\n A dictionary of default query string parameters that will be used\n when accessing **user's** protected resources.\n Applied by :meth:`.access()`, :meth:`.update_user()` and\n :meth:`.User.update()`", "id": "f5370:c1:m0"} {"signature": "@propertydef type_id(self):", "body": "cls = self.__class__mod = sys.modules.get(cls.__module__)return str(self.PROVIDER_TYPE_ID) + '' +str(mod.PROVIDER_ID_MAP.index(cls))", "docstring": "A short string representing the provider implementation id used for\nserialization of :class:`.Credentials` and to identify the type of\nprovider in JavaScript.\n\nThe part before hyphen denotes the type of the provider, the part\nafter hyphen denotes the class id e.g.\n``oauth2.Facebook.type_id = '2-5'``,\n``oauth1.Twitter.type_id = '1-5'``.", "id": "f5370:c1:m7"} {"signature": "@classmethoddef _authorization_header(cls, credentials):", "body": "if cls._x_use_authorization_header:res = ''.join((credentials.consumer_key,credentials.consumer_secret))res = base64.b64encode(six.b(res)).decode()return {'': ''.format(res)}else:return {}", "docstring": "Creates authorization headers if the provider supports it. See:\nhttp://en.wikipedia.org/wiki/Basic_access_authentication.\n\n:param credentials:\n :class:`.Credentials`\n\n:returns:\n Headers as :class:`dict`.", "id": "f5370:c1:m11"} {"signature": "@staticmethoddef _x_credentials_parser(credentials, data):", "body": "return credentials", "docstring": "Override this to handle differences in naming conventions across\nproviders.\n\n:param credentials:\n :class:`.Credentials`\n\n:param dict data:\n Response data dictionary.\n\n:returns:\n :class:`.Credentials`", "id": "f5370:c1:m15"} {"signature": "@abc.abstractmethoddef login(self):", "body": "", "docstring": "Launches the *login procedure* to get **user's credentials** from\n**provider**.\n\nShould be decorated with :func:`.login_decorator`. The *login\nprocedure* is considered finished when the :attr:`.user`\nattribute is not empty when the method runs out of it's flow or\nwhen there are errors.", "id": "f5370:c0:m7"} {"signature": "@abc.abstractmethoddef create_request_elements(self, request_type, credentials,url, method='', params=None, headers=None,body=''):", "body": "", "docstring": "Must return :class:`.RequestElements`.\n\n.. warning::\n\n |classmethod|\n\n:param int request_type:\n Type of the request specified by one of the class's constants.\n\n:param credentials:\n :class:`.Credentials` of the **user** whose\n **protected resource** we want to access.\n\n:param str url:\n URL of the request.\n\n:param str method:\n HTTP method of the request.\n\n:param dict params:\n Dictionary of request parameters.\n\n:param dict headers:\n Dictionary of request headers.\n\n:param str body:\n Body of ``POST``, ``PUT`` and ``PATCH`` requests.\n\n:returns:\n :class:`.RequestElements`", "id": "f5370:c1:m6"} {"signature": "@staticmethoddef csrf_generator(secret):", "body": "hashed = hashlib.md5(uuid.uuid4().bytes + six.b(secret)).hexdigest()span = shift = random.randint(, span)return hashed[shift:shift - span - ]", "docstring": "Generates CSRF token.\n\nInspired by this article:\nhttp://blog.ptsecurity.com/2012/10/random-number-security-in-python.html\n\n:returns:\n :class:`str` Random unguessable string.", "id": "f5370:c0:m16"} {"signature": "@staticmethoddef _split_url(url):", "body": "split = parse.urlsplit(url)base = parse.urlunsplit((split.scheme, split.netloc, split.path, , ))params = parse.parse_qsl(split.query, True)return base, params", "docstring": "Splits given url to url base and params converted to list of tuples.", "id": "f5370:c1:m13"} {"signature": "def _session_key(self, key):", "body": "return ''.format(self.settings.prefix, self.name, key)", "docstring": "Generates session key string.\n\n:param str key:\n e.g. ``\"authomatic:facebook:key\"``", "id": "f5370:c0:m13"} {"signature": "def access(self, url, params=None, method='', headers=None,body='', max_redirects=, content_parser=None):", "body": "if not self.user and not self.credentials:raise CredentialsError(u'')headers = headers or {}self._log(logging.INFO,u''.format(url))request_elements = self.create_request_elements(request_type=self.PROTECTED_RESOURCE_REQUEST_TYPE,credentials=self.credentials,url=url,body=body,params=params,headers=headers,method=method)response = self._fetch(*request_elements,max_redirects=max_redirects,content_parser=content_parser)self._log(logging.INFO,u''.format(response.status))return response", "docstring": "Fetches the **protected resource** of an authenticated **user**.\n\n:param credentials:\n The **user's** :class:`.Credentials` (serialized or normal).\n\n:param str url:\n The URL of the **protected resource**.\n\n:param str method:\n HTTP method of the request.\n\n:param dict headers:\n HTTP headers of the request.\n\n:param str body:\n Body of ``POST``, ``PUT`` and ``PATCH`` requests.\n\n:param int max_redirects:\n Maximum number of HTTP redirects to follow.\n\n:param function content_parser:\n A function to be used to parse the :attr:`.Response.data`\n from :attr:`.Response.content`.\n\n:returns:\n :class:`.Response`", "id": "f5370:c1:m8"} {"signature": "@abc.abstractpropertydef request_token_url(self):", "body": "", "docstring": ":class:`str` URL where we can get the |oauth1| request token.\nsee http://oauth.net/core/1.0a/#auth_step1.", "id": "f5371:c3:m1"} {"signature": "def _access_user_info(self):", "body": "response = super(Bitbucket, self)._access_user_info()response.data.setdefault(\"\", None)email_response = self.access(self.user_email_url)if email_response.data:for item in email_response.data:if item.get(\"\", False):response.data.update(email=item.get(\"\", None))return response", "docstring": "Email is available in separate method so second request is needed.", "id": "f5371:c4:m1"} {"signature": "def method_names(self):", "body": "for c in self.classes():ms = inspect.getmembers(c, lambda f: inspect.ismethod(f) or inspect.isfunction(f))method_name = getattr(self, '', '')method_regex = ''if method_name:if method_name.startswith(self.method_prefix):method_regex = re.compile(r''.format(method_name), flags=re.I)else:if method_name.startswith(\"\"):method_name = method_name.strip(\"\")method_regex = re.compile(r''.format(self.method_prefix, method_name),flags=re.I)else:method_regex = re.compile(r''.format(self.method_prefix, method_name),flags=re.I)for m_name, m in ms:if not m_name.startswith(self.method_prefix): continuecan_yield = Trueif method_regex and not method_regex.match(m_name):can_yield = Falseif can_yield:logger.debug(''.format(m_name, method_name))yield c, m_name", "docstring": "return the actual test methods that matched self.method_name", "id": "f5409:c2:m8"} {"signature": "def raise_any_error(self):", "body": "for tc in self.possible:tc.raise_found_error()", "docstring": "raise any found error in the possible PathFinders", "id": "f5409:c1:m1"} {"signature": "def set_possible(self):", "body": "possible = []name = self.namelogger.debug(''.format(name))name_f = self.name.lower()filepath = \"\"if name_f.endswith(\"\") or \"\" in name_f:bits = name.split(\"\", )filepath = bits[]logger.debug(''.format(filepath))name = bits[] if len(bits) > else \"\"if name:logger.debug(''.format(name, filepath))bits = name.split('')basedir = self.basedirmethod_prefix = self.method_prefixif re.search(r'', bits[-]):logger.debug(''.format(bits[-]))possible.append(PathFinder(basedir, method_prefix, **{'': bits[-],'': bits[-] if len(bits) > else '','': os.sep.join(bits[:-]),'': filepath,}))elif len(bits) > and re.search(r'', bits[-]):logger.debug(''.format(bits[-]))possible.append(PathFinder(basedir, method_prefix, **{'': bits[-],'': bits[-],'': bits[-] if len(bits) > else '','': os.sep.join(bits[:-]),'': filepath,}))else:if self.name:if filepath:if len(bits):possible.append(PathFinder(basedir, method_prefix, **{'': filepath,'': bits[],}))else:possible.append(PathFinder(basedir, method_prefix, **{'': filepath,}))else:logger.debug('')possible.append(PathFinder(basedir, method_prefix, **{'': bits[-],'': os.sep.join(bits[:-]),'': filepath,}))possible.append(PathFinder(basedir, method_prefix, **{'': bits[-],'': bits[-] if len(bits) > else '','': os.sep.join(bits[:-]),'': filepath,}))possible.append(PathFinder(basedir, method_prefix, **{'': os.sep.join(bits),'': filepath,}))else:possible.append(PathFinder(basedir, method_prefix, filepath=filepath))logger.debug(\"\".format(len(possible)))self.possible = possible", "docstring": "break up a module path to its various parts (prefix, module, class, method)\n\nthis uses PEP 8 conventions, so foo.Bar would be foo module with class Bar\n\nreturn -- list -- a list of possible interpretations of the module path\n (eg, foo.bar can be bar module in foo module, or bar method in foo module)", "id": "f5409:c1:m2"} {"signature": "def _find_basename(self, name, basenames, is_prefix=False):", "body": "ret = \"\"fileroots = [(os.path.splitext(n)[], n) for n in basenames]glob = Falseif name.startswith(\"\"):glob = Truename = name.strip(\"\")for fileroot, basename in fileroots:if name in fileroot or fileroot in name:for pf in self.module_postfixes:logger.debug(''.format(basename,name,pf))if glob:if name in fileroot and fileroot.endswith(pf):ret = basenamebreakelse:if fileroot.startswith(name) and fileroot.endswith(pf):ret = basenamebreakif not ret:for pf in self.module_prefixes:n = pf + namelogger.debug(''.format(basename, n))if glob:if fileroot.startswith(pf) and name in fileroot:ret = basenamebreakelse:if fileroot.startswith(n):ret = basenamebreakif not ret:if is_prefix:logger.debug(''.format(basename, name))if basename.startswith(name) or (glob and name in basename):ret = basenameelse:logger.debug(''.format(basename,name))if glob:if name in basename and self._is_module_path(basename):ret = basenameelse:if basename.startswith(name) and self._is_module_path(basename):ret = basenameif ret:logger.debug(''.format(ret))breakreturn ret", "docstring": "check if name combined with test prefixes or postfixes is found anywhere\n in the list of basenames\n\n :param name: string, the name you're searching for\n :param basenames: list, a list of basenames to check\n :param is_prefix: bool, True if this is a prefix search, which means it will\n also check if name matches any of the basenames without the prefixes or\n postfixes, if it is False then the prefixes or postfixes must be present\n (ie, the module we're looking for is the actual test module, not the parent\n modules it's contained in)\n :returns: string, the basename if it is found", "id": "f5409:c2:m9"} {"signature": "def paths(self):", "body": "module_name = getattr(self, '', '')module_prefix = getattr(self, '', '')filepath = getattr(self, '', '')if filepath:if os.path.isabs(filepath):yield filepathelse:yield os.path.join(self.basedir, filepath)else:if module_prefix:basedirs = self._find_prefix_paths(self.basedir, module_prefix)else:basedirs = [self.basedir]for basedir in basedirs:try:if module_name:path = self._find_module_path(basedir, module_name)else:path = basedirif os.path.isfile(path):logger.debug(''.format(path))yield pathelse:seen_paths = set()for root, dirs, files in self.walk(path):for basename in files:if basename.startswith(\"\"):if self._is_module_path(root):filepath = os.path.join(root, basename)if filepath not in seen_paths:logger.debug(''.format(filepath))seen_paths.add(filepath)yield filepathelse:fileroot = os.path.splitext(basename)[]for pf in self.module_postfixes:if fileroot.endswith(pf):filepath = os.path.join(root, basename)if filepath not in seen_paths:logger.debug(''.format(filepath))seen_paths.add(filepath)yield filepathfor pf in self.module_prefixes:if fileroot.startswith(pf):filepath = os.path.join(root, basename)if filepath not in seen_paths:logger.debug(''.format(filepath))seen_paths.add(filepath)yield filepathexcept IOError as e:logger.warning(e, exc_info=True)pass", "docstring": "given a basedir, yield all test modules paths recursively found in\nbasedir that are test modules\n\nreturn -- generator", "id": "f5409:c2:m15"} {"signature": "def loghandler_members():", "body": "Members = namedtuple(\"\", [\"\", \"\", \"\", \"\"])log_manager = logging.Logger.managerloggers = []ignore = set([modname()])if log_manager.root:loggers = list(log_manager.loggerDict.items())loggers.append((\"\", log_manager.root))for logger_name, logger in loggers:if logger_name in ignore: continuefor handler in getattr(logger, \"\", []):members = inspect.getmembers(handler)for member_name, member in members:yield Members(logger_name, handler, member_name, member)", "docstring": "iterate through the attributes of every logger's handler\n\n this is used to switch out stderr and stdout in tests when buffer is True\n\n :returns: generator of tuples, each tuple has (name, handler, member_name, member_val)", "id": "f5415:m4"} {"signature": "def _find_by_path_local(self, path):", "body": "return glob('' % path)", "docstring": "Finds files by globbing on the local filesystem.", "id": "f5421:c0:m9"} {"signature": "def delete(self, filename, storage_type=None, bucket_name=None):", "body": "if not (storage_type and bucket_name):self._delete_local(filename)else:if storage_type != '':raise ValueError('' % storage_type)self._delete_s3(filename, bucket_name)", "docstring": "Deletes the specified file, either locally or from S3, depending on the file's storage type.", "id": "f5421:c0:m5"} {"signature": "def _save_local(self, temp_file, filename, obj):", "body": "path = self._get_path(filename)if not os.path.exists(os.path.dirname(path)):os.makedirs(os.path.dirname(path), self.permission | )fd = open(path, '')temp_file.seek()t = temp_file.read()while t:fd.write(t)t = temp_file.read()fd.close()if self.filesize_field:setattr(obj, self.filesize_field, os.path.getsize(path))return filename", "docstring": "Saves the specified file to the local file system.", "id": "f5421:c0:m6"} {"signature": "def _delete_s3(self, filename, bucket_name):", "body": "conn = S3Connection(self.access_key_id, self.access_key_secret)bucket = conn.get_bucket(bucket_name)if type(filename).__name__ == '':filename = '' + filename.namepath = self._get_s3_path(filename)k = Key(bucket)k.key = pathtry:bucket.delete_key(k)except S3ResponseError:pass", "docstring": "Deletes the specified file from the given S3 bucket.", "id": "f5421:c0:m4"} {"signature": "def parse_interfaces(interfaces):", "body": "parsed_interfaces = collections.defaultdict(dict)for m, d in iteritems(interfaces):app, func = m.split('', )method = parsed_interfaces[app][func] = {}method[''] = ['', '']method[''] = ''method[''] = {}method[''] = {}for name, type_info in iteritems(dict(d[''])):optionality = ''param_type = ''type_info = TYPE_INFO_COMMENT_RE.sub('', type_info)info_pieces = TYPE_INFO_SPLITTER_RE.findall(type_info)for info_piece in info_pieces:if info_piece in ('', ''):optionality = info_pieceelif info_piece == '':optionality = ''param_type = ''elif info_piece == '':optionality = ''elif info_piece == '':optionality = ''else:param_type = info_piecemethod[optionality][name] = map_param_type(param_type)return dict(parsed_interfaces)", "docstring": "Parse the conduit.query json dict response\nThis performs the logic of parsing the non-standard params dict\n and then returning a dict Resource can understand", "id": "f5425:m1"} {"signature": "def get_version(package):", "body": "init_py = open(os.path.join(package, '')).read()return re.search(\"\",init_py, re.MULTILINE).group()", "docstring": "Return package version as listed in `__version__` in `init.py`.", "id": "f5428:m0"} {"signature": "@cli.command()@click.argument('')@click.pass_contextdef retrieve(ctx, preview_id, *args, **kwargs):", "body": "file_previews = ctx.obj['']results = file_previews.retrieve(preview_id)click.echo(results)", "docstring": "Retreive preview results for ID.", "id": "f5430:m2"} {"signature": "def _run_ssh(entries, username, idfile, no_prompt=False, command=None,show=None, only=None, sort_by=None, limit=None, tunnel=None,num=None, random=False):", "body": "_print_entries = num is None_print_help = Falseif len(entries) == :exit('')if no_prompt is True and command is not None:return _run_ssh_command(entries, username, idfile, command, tunnel)elif len(entries) == or random is True:entry = py_random.choice(entries)if command is None:return _connect_ssh(entry, username, idfile, tunnel)else:return _run_ssh_command([entry], username, idfile, command, tunnel)elif command is not None:print(HostEntry.render_entries(entries,additional_columns=show,only_show=only, numbers=True))if no_prompt is False:get_input(\"\"\"\".format(cyan(command), len(entries)))return _run_ssh_command(entries, username, idfile, command, tunnel)else:while True:if sort_by is not None:entries = HostEntry.sort_by(entries, sort_by)if limit is not None:entries = entries[:limit]if _print_entries is True:print(HostEntry.render_entries(entries,additional_columns=show,only_show=only, numbers=True))print('' % len(entries))_print_entries = Falseif _print_help is True:cmd_str = green(command) if command is not None else ''msg = COMMANDS_STRING.format(username=username or '',idfile=idfile or '',cur_cmd=cmd_str)print(msg)_print_help = Falseelif command is not None:print('' % cyan(command))msg = '' % (cyan(''),cyan(''))if num is not None:choice = numelse:choice = get_input(msg)if isinstance(choice, int):if <= choice <= len(entries):breakelse:if num is not None:exit(\"\".format(num, len(entries) - ))else:msg = \"\"print(msg % (len(entries) - ))elif choice == '':if command is None:print('')else:return _run_ssh_command(entries, username, idfile,command, tunnel)elif choice == '':_print_help = Trueelif choice in ['', '', '']:print('')returnelse:commands = choice.split()if len(commands) < :print(yellow('' % choice))else:cmd = commands[]if cmd in ['', '', '']:if cmd == '':username = commands[]elif cmd == '':_idfile = commands[]if not os.path.exists(_idfile):print(yellow('' % _idfile))continueidfile = _idfileelif cmd == '':p = commands[]try:profile = LsiProfile.load(p)_username = profile.username_idfile = expanduser(profile.identity_file)except LsiProfile.LoadError:print(yellow('' % repr(p)))continueusername = _usernameidfile = _idfileprint('' % green(repr(username)))print('' % green(repr(idfile)))elif cmd == '':entries = filter_entries(entries, commands[:], [])_print_entries = Trueelif cmd == '':entries = filter_entries(entries, [], commands[:])_print_entries = Trueelif cmd == '':command = ''.join(commands[:])elif cmd == '':try:limit = int(commands[])_print_entries = Trueexcept ValueError:print(yellow(''))elif cmd == '':sort_by = commands[]if sort_by not in show:show.append(sort_by)_print_entries = Trueelif cmd == '':if show is None:show = commands[:]else:show.extend(commands[:])_print_entries = Trueelse:print(yellow('' % cmd))return _connect_ssh(entries[choice], username, idfile, tunnel)", "docstring": "Lets the user choose which instance to SSH into.\n\n:param entries: The list of host entries.\n:type entries: [:py:class:`HostEntry`]\n:param username: The SSH username to use. Defaults to current user.\n:type username: ``str`` or ``NoneType``\n:param idfile: The identity file to use. Optional.\n:type idfile: ``str`` or ``NoneType``\n:param no_prompt: Whether to disable confirmation for SSH command.\n:type no_prompt: ``bool``\n:param command: SSH command to run on matching instances.\n:type command: ``str`` or ``NoneType``\n:param show: Instance attributes to show in addition to defaults.\n:type show: ``NoneType`` or ``list`` of ``str``\n:param only: If not ``None``, will *only* show these attributes.\n:type only: ``NoneType`` or ``list`` of ``str``\n:param sort_by: What to sort columns by. By default, sort by 'name'.\n:type sort_by: ``str``\n:param limit: At most how many results to show.\n:type limit: ``int`` or ``NoneType``\n:param num: If not None, choose the given entry from within filters.\n:type num: ``int`` or ``NoneType``\n:param random: If true, choose a random entry from within filters.\n:type random: ``bool``", "id": "f5436:m0"} {"signature": "def _run_ssh_command(entries, username, idfile, command, tunnel,parallel=False):", "body": "if len(entries) == :print('')return if command.strip() == '' or command is None:raise ValueError('')print(''.format(green(repr(command)), len(entries)))shell_cmds = []for entry in entries:hname = entry.hostname or entry.public_ipcmd = _build_ssh_command(hname, username, idfile, command, tunnel)shell_cmds.append({'': cmd,'': entry.display()})stream_commands(shell_cmds, parallel=parallel)print(green(''))", "docstring": "Runs the given command over SSH in parallel on all hosts in `entries`.\n\n:param entries: The host entries the hostnames from.\n:type entries: ``list`` of :py:class:`HostEntry`\n:param username: To use a specific username.\n:type username: ``str`` or ``NoneType``\n:param idfile: The SSH identity file to use, or none.\n:type idfile: ``str`` or ``NoneType``\n:param command: The command to run.\n:type command: ``str``\n:param parallel: If true, commands will be run in parallel.\n:type parallel: ``bool``", "id": "f5436:m6"} {"signature": "def _build_ssh_command(hostname, username, idfile, ssh_command, tunnel):", "body": "command = [_get_path(''),'', '','', '']if idfile is not None:command.extend(['', idfile])if tunnel is not None:command.extend(['', '', tunnel, '', '', ''])if username is not None:command.append(''.format(username, hostname))else:command.append(hostname)if ssh_command is not None:command.append(repr(ssh_command))return(''.join(command))", "docstring": "Uses hostname and other info to construct an SSH command.", "id": "f5436:m2"} {"signature": "def _get_args():", "body": "parser = argparse.ArgumentParser(description='')parser.add_argument('', '', action='', default=False,help='')parser.add_argument('', action='', default=False,help='')parser.add_argument('', action='', default=False,help='')parser.add_argument('', help='',default=None)parser.add_argument('', '', action='',help='', default=False)parser.add_argument('', '', help='',default=None)parser.add_argument('', '', default=None,help='')parser.add_argument('', nargs='',help='')parser.add_argument('', '', nargs='',help='')parser.add_argument('', '', type=str,help='')parser.add_argument('', '', action='', default=False,help=\"\"\"\")parser.add_argument('', '', type=str,help='')parser.add_argument('', nargs='', default=None,help='')parser.add_argument('', nargs='', default=None,help='')parser.add_argument('', type=str, default=None,help='')parser.add_argument('', type=str, default=\"\",help='')parser.add_argument('', '', type=int, default=None,help='')parser.add_argument('', action='',help='')parser.add_argument('', nargs=, default=None,help='''')parser.add_argument('', nargs=, default=None,help='''')parser.add_argument('', '', default=None,help='')parser.add_argument(\"\", \"\", action=\"\", default=False,help=\"\")parser.add_argument(\"\", \"\", type=int, default=None,help=\"\")args = parser.parse_args()if args.exclude is None:args.exclude = []if args.sort_by is not None:args.show = (args.show or []) + [args.sort_by]return args", "docstring": "Parse command-line arguments.", "id": "f5436:m9"} {"signature": "@classmethoddef load(cls, profile_name=None):", "body": "lsi_location = os.path.expanduser('')if not os.path.exists(lsi_location):return LsiProfile()cfg_parser = ConfigParser()cfg_parser.read(lsi_location)if profile_name is None:if cfg_parser.has_section(''):profile_name = ''else:return cls()elif not cfg_parser.has_section(profile_name):raise cls.LoadError(''.format(profile_name))def _get(option, alt=None):\"\"\"\"\"\"if cfg_parser.has_option(profile_name, option):return cfg_parser.get(profile_name, option)else:return altif cfg_parser.has_option(profile_name, ''):profile = cls.load(cfg_parser.get(profile_name, ''))else:profile = cls()profile.override('', _get(''))profile.override('', _get(''))profile.override('', _get(''))filters = [s for s in _get('', '').split('') if len(s) > ]exclude = [s for s in _get('', '').split('') if len(s) > ]profile.filters.extend(filters)profile.exclude.extend(exclude)return profile", "docstring": "Loads the user's LSI profile, or provides a default.", "id": "f5436:c0:m2"} {"signature": "def prepare_rows(table):", "body": "num_columns = max(len(row) for row in table)for row in table:while len(row) < num_columns:row.append('')for i in range(num_columns):row[i] = str(row[i]) if row[i] is not None else ''return table", "docstring": "Prepare the rows so they're all strings, and all the same length.\n\n:param table: A 2D grid of anything.\n:type table: [[``object``]]\n\n:return: A table of strings, where every row is the same length.\n:rtype: [[``str``]]", "id": "f5437:m4"} {"signature": "def render_row(num, columns, widths, column_colors=None):", "body": "row_str = ''cell_strs = []for i, column in enumerate(columns):try:cell = column[num]spaces = '' * (widths[i] - len(cell))if column_colors is not None and column_colors[i] is not None:cell = column_colors[i](cell)cell_strs.append('' % (cell, spaces))except IndexError:cell_strs.append('' * (widths[i] + ))return '' % ''.join(cell_strs)", "docstring": "Render the `num`th row of each column in `columns`.\n\n:param num: Which row to render.\n:type num: ``int``\n:param columns: The list of columns.\n:type columns: [[``str``]]\n:param widths: The widths of each column.\n:type widths: [``int``]\n:param column_colors: An optional list of coloring functions.\n:type column_colors: [``str`` -> ``str``] or ``NoneType``\n\n:return: The rendered row.\n:rtype: ``str``", "id": "f5437:m1"} {"signature": "def transpose_table(table):", "body": "if len(table) == :return tableelse:num_columns = len(table[])return [[row[i] for row in table] for i in range(num_columns)]", "docstring": "Transposes a table, turning rows into columns.\n:param table: A 2D string grid.\n:type table: [[``str``]]\n\n:return: The same table, with rows and columns flipped.\n:rtype: [[``str``]]", "id": "f5437:m3"} {"signature": "def get_current_terminal_width():", "body": "try:return int(os.popen('', '').read().split()[])except Exception:return ", "docstring": "Returns the current terminal size, in characters.\n:rtype: ``int``", "id": "f5439:m4"} {"signature": "def get_host(name):", "body": "f = {'': '', '': name}ec2 = boto.connect_ec2(region=get_region())rs = ec2.get_all_instances(filters=f)if len(rs) == :raise Exception('' % name)print(rs[].instances[].public_dns_name)", "docstring": "Prints the public dns name of `name`, if it exists.\n\n:param name: The instance name.\n:type name: ``str``", "id": "f5441:m9"} {"signature": "def _list_all_cached():", "body": "with open(get_cache_location()) as f:contents = f.read()objects = json.loads(contents)return [HostEntry.from_dict(obj) for obj in objects]", "docstring": "Reads the description cache, returning each instance's information.\n:return: A list of host entries.\n:rtype: [:py:class:`HostEntry`]", "id": "f5441:m7"} {"signature": "@classmethoddef prettyname(cls, attrib_name):", "body": "if attrib_name.startswith(''):tagname = attrib_name[len(''):]return ''.format(tagname)elif attrib_name in cls.COLUMN_NAMES:return cls.COLUMN_NAMES[attrib_name]else:return attrib_name", "docstring": "Returns the \"pretty name\" (capitalized, etc) of an attribute, by\nlooking it up in ``cls.COLUMN_NAMES`` if it exists there.\n\n:param attrib_name: An attribute name.\n:type attrib_name: ``str``\n\n:rtype: ``str``", "id": "f5441:c0:m13"} {"signature": "@classmethoddef from_boto_instance(cls, instance):", "body": "return cls(name=instance.tags.get(''),private_ip=instance.private_ip_address,public_ip=instance.ip_address,instance_type=instance.instance_type,instance_id=instance.id,hostname=instance.dns_name,stack_id=instance.tags.get(''),stack_name=instance.tags.get(''),logical_id=instance.tags.get(''),security_groups=[g.name for g in instance.groups],launch_time=instance.launch_time,ami_id=instance.image_id,tags={k.lower(): v for k, v in six.iteritems(instance.tags)})", "docstring": "Loads a ``HostEntry`` from a boto instance.\n\n:param instance: A boto instance object.\n:type instance: :py:class:`boto.ec2.instanceInstance`\n\n:rtype: :py:class:`HostEntry`", "id": "f5441:c0:m10"} {"signature": "def format_string(self, fmat_string):", "body": "try:return fmat_string.format(**vars(self))except KeyError as e:raise ValueError(''''.format(repr(fmat_string),repr(e)))", "docstring": "Takes a string containing 0 or more {variables} and formats it\naccording to this instance's attributes.\n\n:param fmat_string: A string, e.g. '{name}-foo.txt'\n:type fmat_string: ``str``\n\n:return: The string formatted according to this instance. E.g.\n 'production-runtime-foo.txt'\n:rtype: ``str``", "id": "f5441:c0:m14"} {"signature": "def get_cache_location():", "body": "region = get_region()return join(CACHE_DIRECTORY, region.name + \"\")", "docstring": "Use the region name to get the location of the cache JSON file.", "id": "f5441:m4"} {"signature": "def display(self):", "body": "if isinstance(self.name, six.string_types) and len(self.name) > :return ''.format(self.name, self.public_ip)else:return self.public_ip", "docstring": "Returns the best name to display for this host. Uses the instance\nname if available; else just the public IP.\n\n:rtype: ``str``", "id": "f5441:c0:m12"} {"signature": "@classmethoddef list_attributes(cls):", "body": "fake_args = [None for _ in inspect.getargspec(cls.__init__).args[:]]fake_instance = cls(*fake_args)return vars(fake_instance).keys()", "docstring": "Lists all of the attributes to be found on an instance of this class.\nIt creates a \"fake instance\" by passing in `None` to all of the\n``__init__`` arguments, then returns all of the attributes of that\ninstance.\n\n:return: A list of instance attributes of this class.\n:rtype: ``list`` of ``str``", "id": "f5441:c0:m7"} {"signature": "def _uniquify(_list):", "body": "seen = set()result = []for x in _list:if x not in seen:result.append(x)seen.add(x)return result", "docstring": "Remove duplicates in a list.", "id": "f5441:m0"} {"signature": "def create(self, path):", "body": "fpath = os.path.join(self.tempdir, path)if not os.path.exists(os.path.dirname(fpath)):os.makedirs(os.path.dirname(fpath))open(fpath, '').close()return fpath", "docstring": "Create an empty file in the temporary directory, return the full path.", "id": "f5457:c0:m0"} {"signature": "def getint(self, section, option):", "body": "try:return super(BugwarriorConfigParser, self).getint(section, option)except ValueError:if self.get(section, option) == u'':return Noneelse:raise ValueError(\"\".format(section=section, option=option))", "docstring": "Accepts both integers and empty values.", "id": "f5468:c0:m0"} {"signature": "def get_config_path():", "body": "if os.environ.get(BUGWARRIORRC):return os.environ[BUGWARRIORRC]xdg_config_home = (os.environ.get('') or os.path.expanduser(''))xdg_config_dirs = ((os.environ.get('') or '').split(''))paths = [os.path.join(xdg_config_home, '', ''),os.path.expanduser(\"\")]paths += [os.path.join(d, '', '') for d in xdg_config_dirs]for path in paths:if os.path.exists(path):return pathreturn paths[]", "docstring": "Determine the path to the config file. This will return, in this order of\nprecedence:\n- the value of $BUGWARRIORRC if set\n- $XDG_CONFIG_HOME/bugwarrior/bugwarriorc if exists\n- ~/.bugwarriorrc if exists\n- /bugwarrior/bugwarriorc if exists, for dir in $XDG_CONFIG_DIRS\n- $XDG_CONFIG_HOME/bugwarrior/bugwarriorc otherwise", "id": "f5468:m9"} {"signature": "def get_service_password(service, username, oracle=None, interactive=False):", "body": "import getpasspassword = Noneif not oracle or oracle == \"\":keyring = get_keyring()password = keyring.get_password(service, username)if interactive and password is None:oracle = \"\"password = get_service_password(service, username,oracle, interactive=True)if password:keyring.set_password(service, username, password)elif interactive and oracle == \"\":prompt = \"\" % servicepassword = getpass.getpass(prompt)elif oracle.startswith(''):command = oracle[:]return oracle_eval(command)if password is None:die(\"\" %(oracle, interactive, service))return password", "docstring": "Retrieve the sensitive password for a service by:\n\n * retrieving password from a secure store (@oracle:use_keyring, default)\n * asking the password from the user (@oracle:ask_password, interactive)\n * executing a command and use the output as password\n (@oracle:eval:)\n\nNote that the keyring may or may not be locked\nwhich requires that the user provides a password (interactive mode).\n\n:param service: Service name, may be key into secure store (as string).\n:param username: Username for the service (as string).\n:param oracle: Hint which password oracle strategy to use.\n:return: Retrieved password (as string)\n\n.. seealso::\n https://bitbucket.org/kang/python-keyring-lib", "id": "f5468:m4"} {"signature": "def __contains__(self, key):", "body": "return self.config_parser.has_option(self.service_target, self._get_key(key))", "docstring": "Does service section specify this option?", "id": "f5468:c1:m2"} {"signature": "def __getattr__(self, name):", "body": "return getattr(self.config_parser, name)", "docstring": "Proxy undefined attributes/methods to ConfigParser object.", "id": "f5468:c1:m1"} {"signature": "def build_uda_config_overrides(targets):", "body": "from bugwarrior.services import get_servicetargets_udas = {}for target in targets:targets_udas.update(get_service(target).ISSUE_CLASS.UDAS)return {'': targets_udas}", "docstring": "Returns a list of UDAs defined by given targets\n\n For all targets in `targets`, build a dictionary of configuration overrides\n representing the UDAs defined by the passed-in services (`targets`).\n\n Given a hypothetical situation in which you have two services, the first\n of which defining a UDA named 'serviceAid' (\"Service A ID\", string) and\n a second service defining two UDAs named 'serviceBproject'\n (\"Service B Project\", string) and 'serviceBnumber'\n (\"Service B Number\", numeric), this would return the following structure::\n\n {\n 'uda': {\n 'serviceAid': {\n 'label': 'Service A ID',\n 'type': 'string',\n },\n 'serviceBproject': {\n 'label': 'Service B Project',\n 'type': 'string',\n },\n 'serviceBnumber': {\n 'label': 'Service B Number',\n 'type': 'numeric',\n }\n }\n }", "id": "f5473:m10"} {"signature": "def api_request(self, url, **params):", "body": "params[''] = self.config.get(''),params[''] = self.config.get(''),url = \"\" + urlreturn self.json_response(requests.get(url, params=params))", "docstring": "Make a trello API request. This takes an absolute url (without protocol\nand host) and a list of argumnets and return a GET request with the\nkey and token from the configuration", "id": "f5480:c1:m8"} {"signature": "def get_cards(self, list_id):", "body": "params = {'': ''}member = self.config.get('', None)unassigned = self.config.get('', False, asbool)if member is not None:params[''] = ''params[''] = ''cards = self.api_request(\"\".format(list_id=list_id),**params)for card in cards:if (member is Noneor member in [m[''] for m in card['']]or (unassigned and not card[''])):yield card", "docstring": "Returns an iterator for the cards in a given list, filtered\n according to configuration values of trello.only_if_assigned and\n trello.also_unassigned", "id": "f5480:c1:m6"} {"signature": "def get_query(self, query):", "body": "url = self._api_url(\"\", query=query)return self._getter(url, subkey='')", "docstring": "Run a generic issue/PR query", "id": "f5482:c0:m3"} {"signature": "@staticmethoddef _link_field_to_dict(field):", "body": "if not field:return dict()return dict([(part.split('')[][:-],part.split('')[][:-],) for part in field.split('')])", "docstring": "Utility for ripping apart github's Link header field.\n It's kind of ugly.", "id": "f5482:c0:m9"} {"signature": "def _api_url(self, path, **context):", "body": "if self.host == '':baseurl = \"\"else:baseurl = \"\".format(self.host)return baseurl + path.format(**context)", "docstring": "Build the full url to the API endpoint", "id": "f5482:c0:m1"} {"signature": "def get_author(self, issue):", "body": "raise NotImplementedError()", "docstring": "Override this for filtering on tickets", "id": "f5485:c0:m10"} {"signature": "def _aggregate_issues(conf, main_section, target, queue, service_name):", "body": "start = time.time()try:service = get_service(service_name)(conf, main_section, target)issue_count = for issue in service.issues():queue.put(issue)issue_count += except SystemExit as e:log.critical(str(e))queue.put((SERVICE_FINISHED_ERROR, (target, e)))except BaseException as e:if hasattr(e, '') and e.request:e.request.hooks = {}log.exception(\"\" % (target, e))queue.put((SERVICE_FINISHED_ERROR, (target, e)))else:queue.put((SERVICE_FINISHED_OK, (target, issue_count, )))finally:duration = time.time() - startlog.info(\"\" % (target, duration))", "docstring": "This worker function is separated out from the main\n :func:`aggregate_issues` func only so that we can use multiprocessing\n on it for speed reasons.", "id": "f5485:m1"} {"signature": "def to_taskwarrior(self):", "body": "raise NotImplementedError()", "docstring": "Transform a foreign record into a taskwarrior dictionary.", "id": "f5485:c1:m2"} {"signature": "def aggregate_issues(conf, main_section, debug):", "body": "log.info(\"\")targets = aslist(conf.get(main_section, ''))queue = multiprocessing.Queue()log.info(\"\" % len(targets))processes = []if debug:for target in targets:_aggregate_issues(conf,main_section,target,queue,conf.get(target, ''))else:for target in targets:proc = multiprocessing.Process(target=_aggregate_issues,args=(conf, main_section, target, queue, conf.get(target, '')))proc.start()processes.append(proc)time.sleep()currently_running = len(targets)while currently_running > :issue = queue.get(True)if isinstance(issue, tuple):completion_type, args = issueif completion_type == SERVICE_FINISHED_ERROR:target, e = argslog.info(\"\")for process in processes:process.terminate()raise RuntimeError(\"\".format(target))currently_running -= continueyield issuelog.info(\"\")", "docstring": "Return all issues from every target.", "id": "f5485:m2"} {"signature": "def issues(self):", "body": "raise NotImplementedError()", "docstring": "Returns a list of dicts representing issues from a remote service.\n\n This is the main place to begin if you are implementing a new service\n for bugwarrior. Override this to gather issues for each service.\n\n Each item in the list should be a dict that looks something like this:\n\n {\n \"description\": \"Some description of the issue\",\n \"project\": \"some_project\",\n \"priority\": \"H\",\n \"annotations\": [\n \"This is an annotation\",\n \"This is another annotation\",\n ]\n }\n\n\n The description can be 'anything' but must be consistent and unique for\n issues you're pulling from a remote service. You can and should use\n the ``.description(...)`` method to help format your descriptions.\n\n The project should be a string and may be anything you like.\n\n The priority should be one of \"H\", \"M\", or \"L\".", "id": "f5485:c0:m11"} {"signature": "@classmethoddef validate_config(cls, service_config, target):", "body": "if service_config.has_option(target, ''):die(\"\"\"\" % (target, cls.CONFIG_PREFIX))if service_config.has_option(target, ''):die(\"\"\"\" % (target, cls.CONFIG_PREFIX))if service_config.has_option(target, ''):die(\"\"\"\" % (target, cls.CONFIG_PREFIX))if service_config.has_option(target, ''):die(\"\"\"\" % (target, cls.CONFIG_PREFIX))", "docstring": "Validate generic options for a particular target", "id": "f5485:c0:m7"} {"signature": "def get_issue_generator(self, user_id, project_id, project_name):", "body": "user_tasks_data = self.call_api(\"\" + six.text_type(project_id) + \"\")for key, task in enumerate(user_tasks_data):assigned_task = self.get_task_dict(project_id, key, task)if assigned_task:log.debug(\"\" + assigned_task[''] +\"\")yield assigned_task", "docstring": "Approach:\n\n1. Get user ID from bugwarriorrc file\n2. Get list of tickets from /user-tasks for a given project\n3. For each ticket/task returned from #2, get ticket/task info and\n check if logged-in user is primary (look at `is_owner` and\n `user_id`)", "id": "f5486:c0:m2"} {"signature": "def copy(self):", "body": "return ObliviousCookieJar()", "docstring": "Make sure to return an instance of the correct class on copying.", "id": "f5492:c0:m1"} {"signature": "def get_instance(self, data):", "body": "if self.transient:return Noneprops = get_primary_keys(self.opts.model)filters = {prop.key: data.get(prop.key) for prop in props}if None not in filters.values():return self.session.query(self.opts.model).filter_by(**filters).first()return None", "docstring": "Retrieve an existing record by primary key(s). If the schema instance\n is transient, return None.\n\n :param data: Serialized data to inform lookup.", "id": "f5499:c6:m5"} {"signature": "@ma.post_loaddef make_instance(self, data):", "body": "instance = self.instance or self.get_instance(data)if instance is not None:for key, value in iteritems(data):setattr(instance, key, value)return instancekwargs, association_attrs = self._split_model_kwargs_association(data)instance = self.opts.model(**kwargs)for attr, value in iteritems(association_attrs):setattr(instance, attr, value)return instance", "docstring": "Deserialize data to an instance of the model. Update an existing row\n if specified in `self.instance` or loaded by primary key(s) in the data;\n else create a new row.\n\n :param data: Data to deserialize.", "id": "f5499:c6:m6"} {"signature": "def add_value(self, name, value):", "body": "try:if self._rfc_values[name] is None:self._rfc_values[name] = valueelif self.strict:if name in ('', ''):raise errors.MalformedLinkValue(''.format(name))returnexcept KeyError:passif self.strict and name in ('', ''):returnself._values.append((name, value))", "docstring": "Add a new value to the list.\n\n:param str name: name of the value that is being parsed\n:param str value: value that is being parsed\n:raises ietfparse.errors.MalformedLinkValue:\n if *strict mode* is enabled and a validation error\n is detected\n\nThis method implements most of the validation mentioned in\nsections 5.3 and 5.4 of :rfc:`5988`. The ``_rfc_values``\ndictionary contains the appropriate values for the attributes\nthat get special handling. If *strict mode* is enabled, then\nonly values that are acceptable will be added to ``_values``.", "id": "f5516:c0:m1"} {"signature": "def parse_content_type(content_type, normalize_parameter_values=True):", "body": "parts = _remove_comments(content_type).split('')content_type, content_subtype = parts.pop().split('')if '' in content_subtype:content_subtype, content_suffix = content_subtype.split('')else:content_suffix = Noneparameters = _parse_parameter_list(parts, normalize_parameter_values=normalize_parameter_values)return datastructures.ContentType(content_type, content_subtype,dict(parameters),content_suffix)", "docstring": "Parse a content type like header.\n\n :param str content_type: the string to parse as a content type\n :param bool normalize_parameter_values:\n setting this to ``False`` will enable strict RFC2045 compliance\n in which content parameter values are case preserving.\n :return: a :class:`~ietfparse.datastructures.ContentType` instance", "id": "f5518:m5"} {"signature": "def parse_accept_encoding(header_value):", "body": "return _parse_qualified_list(header_value)", "docstring": "Parse the ``Accept-Encoding`` header into a sorted list.\n\n:param str header_value: header value to parse\n\n:return: list of encodings sorted from highest to lowest priority\n\nThe `Accept-Encoding`_ header is a list of encodings with\noptional *quality* values. The quality value indicates the strength\nof the preference where 1.0 is a strong preference and less than 0.001\nis outright rejection by the client.\n\n.. note::\n\n Encodings that are rejected by setting the quality value\n to less than 0.001. If a wildcard is included in the header,\n then it will appear **BEFORE** values that are rejected.\n\n.. _Accept-Encoding: https://tools.ietf.org/html/rfc7231#section-5.3.4", "id": "f5518:m2"} {"signature": "def parse_link(header_value, strict=True):", "body": "sanitized = _remove_comments(header_value)links = []def parse_links(buf):\"\"\"\"\"\"quoted = re.findall('', buf)for segment in quoted:left, match, right = buf.partition(segment)match = match.replace('', '')match = match.replace('', '')buf = ''.join([left, match, right])while buf:matched = re.match(r'', buf)if matched:groups = matched.groupdict()params, _, buf = groups[''].partition('')params = params.replace('', '') if params and not params.startswith(''):raise errors.MalformedLinkValue('')yield (groups[''].strip(),[p.replace('', '').strip()for p in params[:].split('') if p])buf = buf.strip()else:raise errors.MalformedLinkValue('', buf)for target, param_list in parse_links(sanitized):parser = _helpers.ParameterParser(strict=strict)for name, value in _parse_parameter_list(param_list):parser.add_value(name, value)links.append(datastructures.LinkHeader(target=target,parameters=parser.values))return links", "docstring": "Parse a HTTP Link header.\n\n:param str header_value: the header value to parse\n:param bool strict: set this to ``False`` to disable semantic\n checking. Syntactical errors will still raise an exception.\n Use this if you want to receive all parameters.\n:return: a sequence of :class:`~ietfparse.datastructures.LinkHeader`\n instances\n:raises ietfparse.errors.MalformedLinkValue:\n if the specified `header_value` cannot be parsed", "id": "f5518:m7"} {"signature": "def parse_cache_control(header_value):", "body": "directives = {}for segment in parse_list(header_value):name, sep, value = segment.partition('')if sep != '':directives[name] = Noneelif sep and value:value = _dequote(value.strip())try:directives[name] = int(value)except ValueError:directives[name] = valuefor name in _CACHE_CONTROL_BOOL_DIRECTIVES:if directives.get(name, '') is None:directives[name] = Truereturn directives", "docstring": "Parse a `Cache-Control`_ header, returning a dictionary of key-value pairs.\n\nAny of the ``Cache-Control`` parameters that do not have directives, such\nas ``public`` or ``no-cache`` will be returned with a value of ``True``\nif they are set in the header.\n\n:param str header_value: ``Cache-Control`` header value to parse\n:return: the parsed ``Cache-Control`` header values\n:rtype: dict\n\n.. _Cache-Control: https://tools.ietf.org/html/rfc7234#section-5.2", "id": "f5518:m4"} {"signature": "def parse_http_accept_header(header_value):", "body": "warnings.warn(\"\", DeprecationWarning)return parse_accept(header_value)", "docstring": "Parse an HTTP accept-like header.\n\n :param str header_value: the header value to parse\n :return: a :class:`list` of :class:`.ContentType` instances\n in decreasing quality order. Each instance is augmented\n with the associated quality as a ``float`` property\n named ``quality``.\n\n ``Accept`` is a class of headers that contain a list of values\n and an associated preference value. The ever present `Accept`_\n header is a perfect example. It is a list of content types and\n an optional parameter named ``q`` that indicates the relative\n weight of a particular type. The most basic example is::\n\n Accept: audio/*;q=0.2, audio/basic\n\n Which states that I prefer the ``audio/basic`` content type\n but will accept other ``audio`` sub-types with an 80% mark down.\n\n .. _Accept: http://tools.ietf.org/html/rfc7231#section-5.3.2\n\n .. deprecated:: 1.3.0\n Use :func:`~ietfparse.headers.parse_accept` instead.", "id": "f5518:m13"} {"signature": "def parse_accept(header_value):", "body": "next_explicit_q = decimal.ExtendedContext.next_plus(decimal.Decimal(''))headers = [parse_content_type(header)for header in parse_list(header_value)]for header in headers:q = header.parameters.pop('', None)if q is None:q = ''elif float(q) == :q = float(next_explicit_q)next_explicit_q = next_explicit_q.next_minus()header.quality = float(q)def ordering(left, right):\"\"\"\"\"\"if left.quality != right.quality:return right.quality - left.qualityif left == right:return if left > right:return -return return sorted(headers, key=functools.cmp_to_key(ordering))", "docstring": "Parse an HTTP accept-like header.\n\n :param str header_value: the header value to parse\n :return: a :class:`list` of :class:`.ContentType` instances\n in decreasing quality order. Each instance is augmented\n with the associated quality as a ``float`` property\n named ``quality``.\n\n ``Accept`` is a class of headers that contain a list of values\n and an associated preference value. The ever present `Accept`_\n header is a perfect example. It is a list of content types and\n an optional parameter named ``q`` that indicates the relative\n weight of a particular type. The most basic example is::\n\n Accept: audio/*;q=0.2, audio/basic\n\n Which states that I prefer the ``audio/basic`` content type\n but will accept other ``audio`` sub-types with an 80% mark down.\n\n .. _Accept: http://tools.ietf.org/html/rfc7231#section-5.3.2", "id": "f5518:m0"} {"signature": "def parse_link_header(header_value, strict=True):", "body": "warnings.warn(\"\", DeprecationWarning)return parse_link(header_value, strict)", "docstring": "Parse a HTTP Link header.\n\n:param str header_value: the header value to parse\n:param bool strict: set this to ``False`` to disable semantic\n checking. Syntactical errors will still raise an exception.\n Use this if you want to receive all parameters.\n:return: a sequence of :class:`~ietfparse.datastructures.LinkHeader`\n instances\n:raises ietfparse.errors.MalformedLinkValue:\n if the specified `header_value` cannot be parsed\n\n.. deprecated:: 1.3.0\n Use :func:`~ietfparse.headers.parse_link` instead.", "id": "f5518:m14"} {"signature": "def select_content_type(requested, available):", "body": "class Match(object):\"\"\"\"\"\"WILDCARD, PARTIAL, FULL_TYPE, = , , def __init__(self, candidate, pattern):self.candidate = candidateself.pattern = patternif pattern.content_type == pattern.content_subtype == '':self.match_type = self.WILDCARDelif pattern.content_subtype == '':self.match_type = self.PARTIALelse:self.match_type = self.FULL_TYPEself.parameter_distance = len(self.candidate.parameters)for key, value in candidate.parameters.items():if key in pattern.parameters:if pattern.parameters[key] == value:self.parameter_distance -= else:self.parameter_distance += def extract_quality(obj):return getattr(obj, '', )matches = []for pattern in sorted(requested, key=extract_quality, reverse=True):for candidate in sorted(available):if _content_type_matches(candidate, pattern):if candidate == pattern: if extract_quality(pattern) == :raise errors.NoMatch return candidate, patternmatches.append(Match(candidate, pattern))if not matches:raise errors.NoMatchmatches = sorted(matches,key=attrgetter('', ''))return matches[].candidate, matches[].pattern", "docstring": "Selects the best content type.\n\n :param requested: a sequence of :class:`.ContentType` instances\n :param available: a sequence of :class:`.ContentType` instances\n that the server is capable of producing\n\n :returns: the selected content type (from ``available``) and the\n pattern that it matched (from ``requested``)\n :rtype: :class:`tuple` of :class:`.ContentType` instances\n :raises: :class:`.NoMatch` when a suitable match was not found\n\n This function implements the *Proactive Content Negotiation*\n algorithm as described in sections 3.4.1 and 5.3 of :rfc:`7231`.\n The input is the `Accept`_ header as parsed by\n :func:`.parse_http_accept_header` and a list of\n parsed :class:`.ContentType` instances. The ``available`` sequence\n should be a sequence of content types that the server is capable of\n producing. The selected value should ultimately be used as the\n `Content-Type`_ header in the generated response.\n\n .. _Accept: http://tools.ietf.org/html/rfc7231#section-5.3.2\n .. _Content-Type: http://tools.ietf.org/html/rfc7231#section-3.1.1.5", "id": "f5521:m1"} {"signature": "def _normalize_host(host, enable_long_host=False, encode_with_idna=None,scheme=None):", "body": "if encode_with_idna is not None:enable_idna = encode_with_idnaelse:enable_idna = scheme.lower() in IDNA_SCHEMES if scheme else Falseif enable_idna:try:host = ''.join(segment.encode('').decode()for segment in host.split(''))except UnicodeError as exc:raise ValueError(''.format(exc))else:host = parse.quote(host.encode(''), safe=HOST_SAFE_CHARS)if len(host) > and not enable_long_host:raise ValueError('')return host", "docstring": "Normalize a host for a URL.\n\n:param str host: the host name to normalize\n\n:keyword bool enable_long_host: if this keyword is specified\n and it is :data:`True`, then the host name length restriction\n from :rfc:`3986#section-3.2.2` is relaxed.\n:keyword bool encode_with_idna: if this keyword is specified\n and it is :data:`True`, then the ``host`` parameter will be\n encoded using IDN. If this value is provided as :data:`False`,\n then the percent-encoding scheme is used instead. If this\n parameter is omitted or included with a different value, then\n the ``host`` parameter is processed using :data:`IDNA_SCHEMES`.\n:keyword str scheme: if this keyword is specified, then it is\n used to determine whether to apply IDN rules or not. This\n parameter is ignored if `encode_with_idna` is not :data:`None`.\n\n:return: the normalized and encoded string ready for inclusion\n into a URL", "id": "f5521:m5"} {"signature": "def _create_url_identifier(user, password):", "body": "if user is not None:user = parse.quote(user.encode(''), safe=USERINFO_SAFE_CHARS)if password:password = parse.quote(password.encode(''),safe=USERINFO_SAFE_CHARS)return ''.format(user, password)return userreturn None", "docstring": "Generate the user+password portion of a URL.\n\n:param str user: the user name or :data:`None`\n:param str password: the password or :data:`None`", "id": "f5521:m4"} {"signature": "def __str__(self):", "body": "if self.response and not isinstance(self.response, dict):return \"\"\"\".format(self.code, self.message, self.response.content)return \"\"\"\".format(self.code, self.message, self.response)", "docstring": "Exception to String", "id": "f5526:c0:m1"} {"signature": "def __init__(self, endpoint, processes=):", "body": "self.processes = processesif endpoint.endswith(''): self.url_endpoint_root = endpoint[:-]else:self.url_endpoint_root = endpointself.session = requests.Session()self.session.header = {'': ''}methods = ['', '', '', '', '', '']http_retry = Retry(total=, connect=, read=, backoff_factor=,method_whitelist=methods)https_retry = Retry(total=, connect=, read=, backoff_factor=,method_whitelist=methods)http_adapter = HTTPAdapter(max_retries=http_retry)https_adapter = HTTPAdapter(max_retries=https_retry)self.session.mount('', http_adapter)self.session.mount('', https_adapter)self.authenticated = Falseself._token = Noneself.proxies = Noneself.timeout = None", "docstring": "Initialize a client connection\n\n:param endpoint: root endpoint (API URL)\n:type endpoint: str", "id": "f5526:c1:m0"} {"signature": "def get_url(self, endpoint):", "body": "return urljoin(self.url_endpoint_root, endpoint)", "docstring": "Returns the formated full URL endpoint\n:param endpoint: str. the relative endpoint to access\n:return: str", "id": "f5526:c1:m1"} {"signature": "def set_token(self, token):", "body": "if token:auth = HTTPBasicAuth(token, '')self._token = tokenself.authenticated = True self.session.auth = authlogger.debug(\"\", token)else:self._token = Noneself.authenticated = Falseself.session.auth = Nonelogger.debug(\"\")", "docstring": "Set token in authentification for next requests\n:param token: str. token to set in auth. If None, reinit auth", "id": "f5526:c1:m4"} {"signature": "def get_response(self, method, endpoint, headers=None, json=None, params=None, data=None):", "body": "logger.debug(\"\")logger.debug(\"\", endpoint)logger.debug(\"\", method)logger.debug(\"\", headers)logger.debug(\"\", json)logger.debug(\"\", params)logger.debug(\"\", data)url = self.get_url(endpoint)try:response = self.session.request(method=method, url=url, headers=headers, json=json,params=params, data=data, proxies=self.proxies,timeout=self.timeout)logger.debug(\"\", response.headers)logger.debug(\"\", response.content)except RequestException as e:response = {\"\": \"\",\"\": {\"\": e, \"\": BACKEND_ERROR},\"\": {\"\": e, \"\": BACKEND_ERROR}}raise BackendException(code=BACKEND_ERROR,message=e,response=response)else:return response", "docstring": "Returns the response from the requested endpoint with the requested method\n:param method: str. one of the methods accepted by Requests ('POST', 'GET', ...)\n:param endpoint: str. the relative endpoint to access\n:param params: (optional) Dictionary or bytes to be sent in the query string\nfor the :class:`Request`.\n:param data: (optional) Dictionary, bytes, or file-like object to send in the body\nof the :class:`Request`.\n:param json: (optional) json to send in the body of the :class:`Request`.\n:param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`.\n:return: Requests.response", "id": "f5526:c1:m2"} {"signature": "def delete(self, endpoint, headers):", "body": "response = self.get_response(method='', endpoint=endpoint, headers=headers)logger.debug(\"\", response)if response.status_code != : resp = self.decode(response=response)resp = {\"\": \"\"}return resp", "docstring": "Method to delete an item or all items\n\nheaders['If-Match'] must contain the _etag identifier of the element to delete\n\n:param endpoint: endpoint (API URL)\n:type endpoint: str\n:param headers: headers (example: Content-Type)\n:type headers: dict\n:return: response (deletion information)\n:rtype: dict", "id": "f5526:c1:m14"} {"signature": "@staticmethoddef decode(response):", "body": "try:response.raise_for_status()except requests.HTTPError as e:raise BackendException(code=response.status_code,message=e,response=response)else:resp_json = response.json()error = resp_json.get('', None)if error:raise BackendException(code=error[''],message=error[''],response=response)return resp_json", "docstring": "Decodes and returns the response as JSON (dict) or raise BackendException\n:param response: requests.response object\n:return: dict", "id": "f5526:c1:m3"} {"signature": "def get_token(self):", "body": "return self._token", "docstring": "Get the stored backend token", "id": "f5526:c1:m5"} {"signature": "@classmethoddef setUpClass(cls):", "body": "print(\"\")cls.backend_address = \"\"os.environ[''] = ''exit_code = subprocess.call(shlex.split('' % os.environ['']))assert exit_code == cls.pid = subprocess.Popen(['', '', '', '', '','', '', '', '', '',''])time.sleep()headers = {'': ''}params = {'': '', '': '', '': ''}response = requests.post(cls.backend_address + '', json=params, headers=headers)resp = response.json()cls.token = resp['']cls.auth = requests.auth.HTTPBasicAuth(cls.token, '')response = requests.get(cls.backend_address + '',auth=cls.auth)resp = response.json()cls.realmAll_id = resp[''][]['']", "docstring": "Function used in the beginning of test to prepare the backend\n\n:param module:\n:return: None", "id": "f5530:c0:m0"} {"signature": "@classmethoddef setUpClass(cls):", "body": "print(\"\")cls.backend_address = \"\"os.environ[''] = ''exit_code = subprocess.call(shlex.split('' % os.environ['']))assert exit_code == cls.pid = subprocess.Popen(['', '', '', '', '','', '', '', '', '',''])time.sleep()headers = {'': ''}params = {'': '', '': '', '': ''}response = requests.post(cls.backend_address + '', json=params, headers=headers)resp = response.json()cls.token = resp['']cls.auth = requests.auth.HTTPBasicAuth(cls.token, '')response = requests.get(cls.backend_address + '',auth=cls.auth)resp = response.json()cls.realmAll_id = resp[''][]['']", "docstring": "Function used in the beginning of test to prepare the backend\n\n:param module:\n:return: None", "id": "f5533:c0:m0"} {"signature": "@classmethoddef setUpClass(cls):", "body": "print(\"\")cls.backend_address = \"\"", "docstring": "Function used in the beginning of test to prepare the backend\n\n:param module:\n:return: None", "id": "f5536:c1:m0"} {"signature": "@classmethoddef tearDownClass(cls):", "body": "print(\"\")cls.pid.kill()", "docstring": "Stop the backend at the end of the tests\n\n:param module:\n:return: None", "id": "f5537:c0:m1"} {"signature": "@classmethoddef setUpClass(cls):", "body": "print(\"\")cls.backend_address = \"\"os.environ[''] = ''exit_code = subprocess.call(shlex.split('' % os.environ['']))assert exit_code == cls.pid = subprocess.Popen(['', '', '', '', '','', '', '', '', '',''])time.sleep()headers = {'': ''}params = {'': '', '': '', '': ''}response = requests.post(cls.backend_address + '', json=params, headers=headers)resp = response.json()cls.token = resp['']cls.auth = requests.auth.HTTPBasicAuth(cls.token, '')response = requests.get(cls.backend_address + '',auth=cls.auth)resp = response.json()cls.realmAll_id = resp[''][]['']headers = {'': ''}params = {'': '', '': cls.realmAll_id}for num in range():params[''] = '' + str(num)response = requests.post(cls.backend_address + '', json=params,headers=headers, auth=cls.auth)print(response.__dict__)assert_equal(response.status_code, )", "docstring": "Function used in the beginning of test to prepare the backend\n\n:param module:\n:return: None", "id": "f5537:c0:m0"} {"signature": "def _norm_unicode(s):", "body": "return normalize('', py3compat.cast_unicode(s))", "docstring": "Normalize unicode strings", "id": "f5548:m1"} {"signature": "def postgres_contents_config():", "body": "config = Config()config.NotebookApp.contents_manager_class = PostgresContentsManagerconfig.PostgresContentsManager.user_id = ''config.PostgresContentsManager.db_url = TEST_DB_URLreturn config", "docstring": "Shared setup code for PostgresContentsAPITest and subclasses.", "id": "f5550:m2"} {"signature": "def memoize_single_arg(f):", "body": "memo = {}@wraps(f)def memoized_f(arg):try:return memo[arg]except KeyError:result = memo[arg] = f(arg)return resultreturn memoized_f", "docstring": "Decorator memoizing a single-argument function", "id": "f5553:m5"} {"signature": "def _managers_changed(self, name, old, new):", "body": "for key in new:if '' in key:raise ValueError(\"\" % key)self.managers = {k.strip(''): v for k, v in new.items()}", "docstring": "Strip slashes from directories before updating.", "id": "f5555:c0:m1"} {"signature": "def path_dispatch_kwarg(mname, path_default, returns_model):", "body": "def _wrapper(self, path=path_default, **kwargs):prefix, mgr, mgr_path = _resolve_path(path, self.managers)result = getattr(mgr, mname)(path=mgr_path, **kwargs)if returns_model and prefix:return _apply_prefix(prefix, result)else:return resultreturn _wrapper", "docstring": "Parameterized decorator for methods that accept path as a second\nargument.", "id": "f5555:m5"} {"signature": "def path_dispatch2(mname, first_argname, returns_model):", "body": "def _wrapper(self, *args, **kwargs):other, args = _get_arg(first_argname, args, kwargs)path, args = _get_arg('', args, kwargs)prefix, mgr, mgr_path = _resolve_path(path, self.managers)result = getattr(mgr, mname)(other, mgr_path, *args, **kwargs)if returns_model and prefix:return _apply_prefix(prefix, result)else:return resultreturn _wrapper", "docstring": "Decorator for methods that accept path as a second argument.", "id": "f5555:m4"} {"signature": "@outside_root_to_404def get(self, path, content=True, type=None, format=None):", "body": "path = normalize_api_path(path)if path:return self.__get(path, content=content, type=type, format=format)if not content:return base_directory_model('')extra_content = self._extra_root_dirs()rm = self.root_managerif rm is None:root_model = base_directory_model('')root_model.update(format='',content=extra_content,)else:root_model = rm.get(path,content=content,type=type,format=format,)root_model[''].extend(extra_content)return root_model", "docstring": "Special case handling for listing root dir.", "id": "f5555:c0:m4"} {"signature": "def _get_arg(argname, args, kwargs):", "body": "try:return kwargs.pop(argname), argsexcept KeyError:passtry:return args[], args[:]except IndexError:raise TypeError(\"\" % argname)", "docstring": "Get an argument, either from kwargs or from the first entry in args.\nRaises a TypeError if argname not in kwargs and len(args) == 0.\n\nMutates kwargs in place if the value is found in kwargs.", "id": "f5555:m1"} {"signature": "@contextmanagerdef temp_alembic_ini(alembic_dir_location, sqlalchemy_url):", "body": "with TemporaryDirectory() as tempdir:alembic_ini_filename = join(tempdir, '')with open(alembic_ini_filename, '') as f:f.write(ALEMBIC_INI_TEMPLATE.format(alembic_dir_location=alembic_dir_location,sqlalchemy_url=sqlalchemy_url,))yield alembic_ini_filename", "docstring": "Temporarily write an alembic.ini file for use with alembic migration\nscripts.", "id": "f5559:m0"} {"signature": "def _get_file(db, user_id, api_path, query_fields, decrypt_func):", "body": "result = db.execute(_select_file(user_id, api_path, query_fields, limit=),).first()if result is None:raise NoSuchFile(api_path)if files.c.content in query_fields:return to_dict_with_content(query_fields, result, decrypt_func)else:return to_dict_no_content(query_fields, result)", "docstring": "Get file data for the given user_id, path, and query_fields. The\nquery_fields parameter specifies which database fields should be\nincluded in the returned file data.", "id": "f5560:m19"} {"signature": "def ensure_directory(db, user_id, api_path):", "body": "with ignore_unique_violation():create_directory(db, user_id, api_path)", "docstring": "Ensure that the given user has the given directory.", "id": "f5560:m6"} {"signature": "def preprocess_incoming_content(content, encrypt_func, max_size_bytes):", "body": "encrypted = encrypt_func(content)if max_size_bytes != UNLIMITED and len(encrypted) > max_size_bytes:raise FileTooLarge()return encrypted", "docstring": "Apply preprocessing steps to file/notebook content that we're going to\nwrite to the database.\n\nApplies ``encrypt_func`` to ``content`` and checks that the result is\nsmaller than ``max_size_bytes``.", "id": "f5560:m0"} {"signature": "def delete_directory(db, user_id, api_path):", "body": "db_dirname = from_api_dirname(api_path)try:result = db.execute(directories.delete().where(and_(directories.c.user_id == user_id,directories.c.name == db_dirname,)))except IntegrityError as error:if is_foreign_key_violation(error):raise DirectoryNotEmpty(api_path)else:raiserowcount = result.rowcountif not rowcount:raise NoSuchDirectory(api_path)return rowcount", "docstring": "Delete a directory.", "id": "f5560:m9"} {"signature": "def reencrypt_user_content(engine,user_id,old_decrypt_func,new_encrypt_func,logger):", "body": "logger.info(\"\", user_id)with engine.begin() as db:logger.info(\"\", user_id)for (file_id,) in select_file_ids(db, user_id):reencrypt_row_content(db,files,file_id,old_decrypt_func,new_encrypt_func,logger,)logger.info(\"\", user_id)for (cp_id,) in select_remote_checkpoint_ids(db, user_id):reencrypt_row_content(db,remote_checkpoints,cp_id,old_decrypt_func,new_encrypt_func,logger,)logger.info(\"\", user_id)", "docstring": "Re-encrypt all of the files and checkpoints for a single user.", "id": "f5560:m42"} {"signature": "def files_in_directory(db, user_id, db_dirname):", "body": "fields = _file_default_fields()rows = db.execute(select(fields,).where(_is_in_directory(files, user_id, db_dirname),).order_by(files.c.user_id,files.c.parent_name,files.c.name,files.c.created_at,).distinct(files.c.user_id, files.c.parent_name, files.c.name,))return [to_dict_no_content(fields, row) for row in rows]", "docstring": "Return files in a directory.", "id": "f5560:m12"} {"signature": "def select_file_ids(db, user_id):", "body": "return list(db.execute(select([files.c.id]).where(files.c.user_id == user_id)))", "docstring": "Get all file ids for a user.", "id": "f5560:m40"} {"signature": "def _file_creation_order():", "body": "return desc(files.c.created_at)", "docstring": "Return an order_by on file creation date.", "id": "f5560:m16"} {"signature": "def save_file(db, user_id, path, content, encrypt_func, max_size_bytes):", "body": "content = preprocess_incoming_content(content,encrypt_func,max_size_bytes,)directory, name = split_api_filepath(path)with db.begin_nested() as savepoint:try:res = db.execute(files.insert().values(name=name,user_id=user_id,parent_name=directory,content=content,))except IntegrityError as error:if is_unique_violation(error):savepoint.rollback()res = db.execute(files.update().where(_file_where(user_id, path),).values(content=content,created_at=func.now(),))else:raisereturn res", "docstring": "Save a file.\n\nTODO: Update-then-insert is probably cheaper than insert-then-update.", "id": "f5560:m26"} {"signature": "def reencrypt_row_content(db,table,row_id,decrypt_func,encrypt_func,logger):", "body": "q = (select([table.c.content]).with_for_update().where(table.c.id == row_id))[(content,)] = db.execute(q)logger.info(\"\", table.name, row_id)db.execute(table.update().where(table.c.id == row_id).values(content=encrypt_func(decrypt_func(content))))logger.info(\"\", table.name, row_id)", "docstring": "Re-encrypt a row from ``table`` with ``id`` of ``row_id``.", "id": "f5560:m39"} {"signature": "def run_migrations_offline():", "body": "url = config.get_main_option(\"\")context.configure(url=url, target_metadata=target_metadata)with context.begin_transaction():context.run_migrations()", "docstring": "Run migrations in 'offline' mode.\n\n This configures the context with just a URL\n and not an Engine, though an Engine is acceptable\n here as well. By skipping the Engine creation\n we don't even need a DBAPI to be available.\n\n Calls to context.execute() here emit the given string to the\n script output.", "id": "f5568:m0"} {"signature": "@contextmanagerdef ignore_unique_violation():", "body": "try:yieldexcept IntegrityError as error:if not is_unique_violation(error):raise", "docstring": "Context manager for gobbling unique violations.\n\nNOTE: If a unique violation is raised, the existing psql connection will\nnot accept new commands. This just silences the python-level error. If\nyou need emit another command after possibly ignoring a unique violation,\nyou should explicitly use savepoints.", "id": "f5570:m2"} {"signature": "def to_dict_no_content(fields, row):", "body": "assert(len(fields) == len(row))field_names = list(map(_get_name, fields))assert '' not in field_names, \"\"return dict(zip(field_names, row))", "docstring": "Convert a SQLAlchemy row that does not contain a 'content' field to a dict.\n\nIf row is None, return None.\n\nRaises AssertionError if there is a field named 'content' in ``fields``.", "id": "f5570:m4"} {"signature": "@outside_root_to_404def guess_type(self, path, allow_directory=True):", "body": "if path.endswith(''):return ''elif allow_directory and self.dir_exists(path):return ''else:return ''", "docstring": "Guess the type of a file.\n\nIf allow_directory is False, don't consider the possibility that the\nfile is a directory.", "id": "f5571:c0:m6"} {"signature": "def _save_directory(self, db, path):", "body": "ensure_directory(db, self.user_id, path)", "docstring": "'Save' a directory.", "id": "f5571:c0:m21"} {"signature": "def _file_model_from_db(self, record, content, format):", "body": "path = to_api_path(record[''] + record[''])model = base_model(path)model[''] = ''model[''] = model[''] = record['']if content:bcontent = record['']model[''], model[''], model[''] = from_b64(path,bcontent,format,)return model", "docstring": "Build a file model from database record.", "id": "f5571:c0:m17"} {"signature": "def _directory_model_from_db(self, record, content):", "body": "model = base_directory_model(to_api_path(record['']))if content:model[''] = ''model[''] = list(chain(self._convert_file_records(record['']),(self._directory_model_from_db(subdir, False)for subdir in record['']),))return model", "docstring": "Build a directory model from database directory record.", "id": "f5571:c0:m16"} {"signature": "def _notebook_model_from_db(self, record, content):", "body": "path = to_api_path(record[''] + record[''])model = base_model(path)model[''] = ''model[''] = model[''] = record['']if content:content = reads_base64(record[''])self.mark_trusted_cells(content, path)model[''] = contentmodel[''] = ''self.validate_notebook_model(model)return model", "docstring": "Build a notebook model from database record.", "id": "f5571:c0:m13"} {"signature": "def writes_base64(nb, version=NBFORMAT_VERSION):", "body": "return b64encode(writes(nb, version=version).encode(''))", "docstring": "Write a notebook as base64.", "id": "f5573:m8"} {"signature": "def split_api_filepath(path):", "body": "parts = path.rsplit('', )if len(parts) == :name = parts[]dirname = ''else:name = parts[]dirname = parts[] + ''return from_api_dirname(dirname), name", "docstring": "Split an API file path into directory and name.", "id": "f5573:m7"} {"signature": "def from_b64(path, bcontent, format):", "body": "decoders = {'': lambda path, bcontent: (bcontent.decode(''), ''),'': _decode_text_from_base64,None: _decode_unknown_from_base64,}try:content, real_format = decoders[format](path, bcontent)except HTTPError:raiseexcept Exception as e:raise CorruptedFile(e)default_mimes = {'': '','': '',}mimetype = mimetypes.guess_type(path)[] or default_mimes[real_format]return content, real_format, mimetype", "docstring": "Decode base64 content for a file.\n\nformat:\n If 'text', the contents will be decoded as UTF-8.\n If 'base64', do nothing.\n If not specified, try to decode as UTF-8, and fall back to base64\n\nReturns a triple of decoded_content, format, and mimetype.", "id": "f5573:m12"} {"signature": "def _decode_unknown_from_base64(path, bcontent):", "body": "content = b64decode(bcontent)try:return (content.decode(''), '')except UnicodeError:passreturn bcontent.decode(''), ''", "docstring": "Decode base64 data of unknown format.\n\nAttempts to interpret data as utf-8, falling back to ascii on failure.", "id": "f5573:m11"} {"signature": "def get_pull_requests(app, repo_config):", "body": "response = get_api_response(app, repo_config, \"\")if not response.ok:raise Exception(\"\".format(response.status_code))return (item for item in response.json)", "docstring": "Last 30 pull requests from a repository.\n\n :param app: Flask app\n :param repo_config: dict with ``github_repo`` key\n\n :returns: id for a pull request", "id": "f5575:m12"} {"signature": "def get_key_for_purpose_and_type(self, purpose, key_type):", "body": "key = [key for key in self.keys.values() if key.purpose == purpose and key.key_type == key_type]try:return key[]except IndexError:return None", "docstring": "Gets a list of keys that match the purpose and key_type, and returns the first key in that list\nNote, if there are many keys that match the criteria, the one you get back will be random from that list\n:returns: A key object that matches the criteria", "id": "f5583:c1:m4"} {"signature": "@staticmethoddef decrypt_with_key(encrypted_token, key):", "body": "try:jwe_token = jwe.JWE(algs=['', ''])jwe_token.deserialize(encrypted_token)jwe_token.decrypt(key)return jwe_token.payload.decode()except (ValueError, InvalidJWEData) as e:raise InvalidTokenException(str(e)) from e", "docstring": "Decrypts JWE token with supplied key\n:param encrypted_token:\n:param key: A (:class:`jwcrypto.jwk.JWK`) decryption key or a password\n:returns: The payload of the decrypted token", "id": "f5589:c0:m1"} {"signature": "def get_private_key(platform, service, purpose, key_use, version, private_key, keys_folder):", "body": "private_key_data = get_file_contents(keys_folder, private_key)private_key = load_pem_private_key(private_key_data.encode(), None, backend=backend)pub_key = private_key.public_key()pub_bytes = pub_key.public_bytes(Encoding.PEM, PublicFormat.SubjectPublicKeyInfo)kid = _generate_kid_from_key(pub_bytes.decode())key = _create_key(platform=platform, service=service, key_use=key_use,key_type=\"\", purpose=purpose, version=version,public_key=pub_bytes.decode(), private_key=private_key_data)return kid, key", "docstring": "Loads a private key from the file system and adds it to a dict of keys\n:param keys: A dict of keys\n:param platform the platform the key is for\n:param service the service the key is for\n:param key_use what the key is used for\n:param version the version of the key\n:param purpose: The purpose of the private key\n:param private_key: The name of the private key to add\n:param keys_folder: The location on disk where the key exists\n:param kid_override: This allows the caller to override the generated KID value\n:return: None", "id": "f5591:m5"} {"signature": "def get_public_key(platform, service, purpose, key_use, version, public_key, keys_folder):", "body": "public_key_data = get_file_contents(keys_folder, public_key)pub_key = load_pem_public_key(public_key_data.encode(), backend=backend)pub_bytes = pub_key.public_bytes(Encoding.PEM, PublicFormat.SubjectPublicKeyInfo)kid = _generate_kid_from_key(pub_bytes.decode())key = _create_key(platform=platform, service=service, key_use=key_use,key_type=\"\", purpose=purpose, version=version,public_key=public_key_data)return kid, key", "docstring": "Loads a public key from the file system and adds it to a dict of keys\n:param keys: A dict of keys\n:param platform the platform the key is for\n:param service the service the key is for\n:param key_use what the key is used for\n:param version the version of the key\n:param purpose: The purpose of the public key\n:param public_key: The name of the public key to add\n:param keys_folder: The location on disk where the key exists\n:param kid_override: This allows the caller to override the generated KID value\n:return: None", "id": "f5591:m4"} {"signature": "@pointfreedef pfmap(func, iterable):", "body": "for item in iterable:yield func(item)", "docstring": "A pointfree map function: Returns an iterator over the results of\n applying a function of one argument to the items of a given iterable.\n The function is provided \"lazily\" to the given iterable; each function\n application is performed on the fly as it is requested.\n\n :param func: A function of one argument to apply to each item\n :param iterable: An iterator yielding input for the function\n :rtype: Iterator of function application results\n\n Example::\n\n >>> f = pfmap(lambda x: x+1) \\\\\n ... >> pfmap(lambda x: x*2) \\\\\n ... >> pfcollect\n\n >>> f(range(5))\n [2, 4, 6, 8, 10]", "id": "f5604:m0"} {"signature": "@classmethoddef make_copy(klass, inst, func=None, argv=None, extra_argv=None, copy_sig=True):", "body": "dest = klass(func or inst.func)dest.argv = (argv or inst.argv).copy()dest.extra_argv = list(extra_argv if extra_argv else inst.extra_argv)if copy_sig:dest.__sig_from_partial(inst)return dest", "docstring": "Makes a new instance of the partial application wrapper based on\n an existing instance, optionally overriding the original's wrapped\n function and/or saved arguments.\n\n :param inst: The partial instance we're copying\n :param func: Override the original's wrapped function\n :param argv: Override saved argument values\n :param extra_argv: Override saved extra positional arguments\n :param copy_sig: Copy original's signature?\n :rtype: New partial wrapper instance", "id": "f5604:c0:m3"} {"signature": "@pointfreedef pffilter(pred, iterable):", "body": "for item in iterable:if pred(item): yield item", "docstring": "Pointfree filter function.\n\n Example::\n\n >>> f = pffilter(lambda n: n % 2 == 0) \\\\\n ... >> pfcollect\n\n >>> f(range(5))\n [0, 2, 4]", "id": "f5604:m2"} {"signature": "def __new_argv(self, *new_pargs, **new_kargs):", "body": "new_argv = self.argv.copy()new_extra_argv = list(self.extra_argv)for v in new_pargs:arg_name = Nonefor name in self.pargl:if not name in new_argv:arg_name = namebreakif arg_name:new_argv[arg_name] = velif self.var_pargs:new_extra_argv.append(v)else:num_prev_pargs = len([name for name in self.pargl if name in self.argv])raise TypeError(\"\"% (self.__name__,len(self.pargl),num_prev_pargs + len(new_pargs)))for k,v in new_kargs.items():if not (self.var_kargs or (k in self.pargl) or (k in self.kargl)):raise TypeError(\"\"% (self.__name__, k))new_argv[k] = vreturn (new_argv, new_extra_argv)", "docstring": "Calculate new argv and extra_argv values resulting from adding\n the specified positional and keyword arguments.", "id": "f5604:c0:m5"} {"signature": "@pointfreedef pfprint_all(iterable, end='', file=None):", "body": "for item in iterable:pfprint(item, end=end, file=file)", "docstring": "Prints each item from an iterable.\n\n :param iterable: An iterable yielding values to print\n :param end: String to append to the end of printed output\n :param file: File to which output is printed\n :rtype: None\n\n Example::\n\n >>> @pointfree\n ... def prefix_all(prefix, iterable):\n ... for item in iterable:\n ... yield \"%s%s\" % (prefix, item)\n\n >>> fn = prefix_all(\"An item: \") >> pfprint_all\n\n >>> fn([\"foo\", \"bar\", \"baz\"])\n An item: foo\n An item: bar\n An item: baz", "id": "f5604:m5"} {"signature": "@pointfreedef pfignore_all(iterator):", "body": "for item in iterator:pass", "docstring": "Consumes all the items from an iterable, discarding their output.\n This may be useful if evaluating the iterable produces some desirable\n side-effect, but you have no need to collect its output.\n\n :param iterable: An iterable\n :rtype: None\n\n Example::\n\n >>> result = []\n\n >>> @pointfree\n ... def append_all(collector, iterable):\n ... for item in iterable:\n ... collector.append(item)\n ... yield item\n\n >>> @pointfree\n ... def square_all(iterable):\n ... for item in iterable:\n ... yield item**2\n\n >>> fn = square_all \\\\\n ... >> append_all(result) \\\\\n ... >> pfignore_all\n >>> fn([1, 2, 3, 4])\n >>> result\n [1, 4, 9, 16]", "id": "f5604:m6"} {"signature": "@pointfreedef pfcollect(iterable, n=None):", "body": "if n:return list(itertools.islice(iterable, n))else:return list(iterable)", "docstring": "Collects and returns a list of values from the given iterable. If\n the n parameter is not specified, collects all values from the\n iterable.\n\n :param iterable: An iterable yielding values for the list\n :param n: An optional maximum number of items to collect\n :rtype: List of values from the iterable\n\n Example::\n\n >>> @pointfree\n ... def fibonaccis():\n ... a, b = 0, 1\n ... while True:\n ... a, b = b, a+b\n ... yield a\n\n >>> (pfcollect(n=10) * fibonaccis)()\n [1, 1, 2, 3, 5, 8, 13, 21, 34, 55]", "id": "f5604:m3"} {"signature": "@propertydef mid_price(self):", "body": "return (self.ask_price + self.bid_price) / Decimal('')", "docstring": "Compute the mid point between the bid and ask prices", "id": "f5625:c2:m2"} {"signature": "def generate_dataframe(self, symbols=None, date_index=None, price_type=\"\"):", "body": "if symbols is None:symbols = list(Currency.objects.all().values_list('', flat=True))try:start_date = date_index[]end_date = date_index[-]except:start_date = DATEFRAME_START_DATEend_date = date.today()date_index = date_range(start_date, end_date)currency_price_data = CurrencyPrice.objects.filter(currency__symbol__in=symbols,date__gte=date_index[],date__lte=date_index[-]).values_list('', '', '', '')try:forex_data_array = np.core.records.fromrecords(currency_price_data, names=['', '', '', ''])except IndexError:forex_data_array = np.core.records.fromrecords([(date(, , ), \"\", , )], names=['', '', '', ''])df = DataFrame.from_records(forex_data_array, index='')df[''] = df.indexif price_type == \"\":df[''] = (df[''] + df['']) / elif price_type == \"\":df[''] = df['']elif price_type == \"\":df[''] = df['']else:raise ValueError(\"\" % str(price_type))df = df.pivot(index='', columns='', values='')df = df.reindex(date_index)df = df.fillna(method=\"\")unlisted_symbols = list(set(symbols) - set(df.columns))for unlisted_symbol in unlisted_symbols:df[unlisted_symbol] = np.nandf = df[symbols]return df", "docstring": "Generate a dataframe consisting of the currency prices (specified by symbols)\nfrom the start to end date", "id": "f5625:c1:m0"} {"signature": "@propertydef spread(self):", "body": "return (self.ask_price - self.bid_price)", "docstring": "Compute the difference between bid and ask prices", "id": "f5625:c2:m3"} {"signature": "@app.route('', methods=['', ''])def preferences():", "body": "if request.method == '':resp = make_response(redirect(urljoin(settings[''][''], url_for(''))))try:request.preferences.parse_form(request.form)except ValidationException:request.errors.append(gettext(''))return respreturn request.preferences.save(resp)image_proxy = request.preferences.get_value('')lang = request.preferences.get_value('')disabled_engines = request.preferences.engines.get_disabled()allowed_plugins = request.preferences.plugins.get_enabled()stats = {}for c in categories:for e in categories[c]:stats[e.name] = {'': None,'': False,'': False}if e.timeout > settings['']['']:stats[e.name][''] = Truestats[e.name][''] = _is_selected_language_supported(e, request.preferences)for engine_stat in get_engines_stats()[][]:stats[engine_stat.get('')][''] = round(engine_stat.get(''), )if engine_stat.get('') > settings['']['']:stats[engine_stat.get('')][''] = Truereturn render('',locales=settings[''],current_locale=get_locale(),image_proxy=image_proxy,engines_by_category=categories,stats=stats,answerers=[{'': a.self_info(), '': a.keywords} for a in answerers],disabled_engines=disabled_engines,autocomplete_backends=autocomplete_backends,shortcuts={y: x for x, y in engine_shortcuts.items()},themes=themes,plugins=plugins,doi_resolvers=settings[''],current_doi_resolver=get_doi_resolver(request.args, request.preferences.get_value('')),allowed_plugins=allowed_plugins,theme=get_current_theme_name(),preferences_url_params=request.preferences.get_as_url_params(),base_url=get_base_url(),preferences=True)", "docstring": "Render preferences page && save user preferences", "id": "f5718:m15"} {"signature": "@app.route('', methods=[''])def stats():", "body": "stats = get_engines_stats()return render('',stats=stats,)", "docstring": "Render engine statistics page.", "id": "f5718:m18"} {"signature": "def get_current_theme_name(override=None):", "body": "if override and (override in themes or override == ''):return overridetheme_name = request.args.get('', request.preferences.get_value(''))if theme_name not in themes:theme_name = default_themereturn theme_name", "docstring": "Returns theme name.\n\n Checks in this order:\n 1. override\n 2. cookies\n 3. settings", "id": "f5718:m4"} {"signature": "def request(method, url, **kwargs):", "body": "time_before_request = time()session = SessionSinglePool()kwargs[''] = settings[''].get('') or Noneif '' in kwargs:timeout = kwargs['']else:timeout = getattr(threadLocal, '', None)if timeout is not None:kwargs[''] = timeoutresponse = session.request(method=method, url=url, **kwargs)time_after_request = time()if timeout is not None:timeout_overhead = start_time = getattr(threadLocal, '', time_before_request)search_duration = time_after_request - start_timeif search_duration > timeout + timeout_overhead:raise requests.exceptions.Timeout(response=response)session.close()if hasattr(threadLocal, ''):threadLocal.total_time += time_after_request - time_before_requestreturn response", "docstring": "same as requests/requests/api.py request(...)", "id": "f5724:m3"} {"signature": "def searx_bang(full_query):", "body": "if len(full_query.getSearchQuery()) == :return []results = []first_char = full_query.getSearchQuery()[]if first_char == '' or first_char == '':if len(full_query.getSearchQuery()) == :results.append(first_char + \"\")results.append(first_char + \"\")results.append(first_char + \"\")else:engine_query = full_query.getSearchQuery()[:]for categorie in categories:if categorie.startswith(engine_query):results.append(first_char + ''.format(categorie=categorie))for engine in engines:if engine.startswith(engine_query.replace('', '')):results.append(first_char + ''.format(engine=engine.replace('', '')))for engine_shortcut in engine_shortcuts:if engine_shortcut.startswith(engine_query):results.append(first_char + ''.format(engine_shortcut=engine_shortcut))elif first_char == '':if len(full_query.getSearchQuery()) == :results.append(\"\")results.append(\"\")results.append(\"\")results.append(\"\")else:engine_query = full_query.getSearchQuery()[:]for lc in language_codes:lang_id, lang_name, country, english_name = map(unicode.lower, lc)if lang_id.startswith(engine_query):if len(engine_query) <= :results.append(u''.format(lang_id=lang_id.split('')[]))else:results.append(u''.format(lang_id=lang_id))if lang_name.startswith(engine_query) or english_name.startswith(engine_query):results.append(u''.format(lang_name=lang_name))if country.startswith(engine_query.replace('', '')):results.append(u''.format(country=country.replace('', '')))result_set = set(results)for query_part in full_query.query_parts:if query_part in result_set:result_set.remove(query_part)return list(result_set)", "docstring": "check if the searchQuery contain a bang, and create fitting autocompleter results", "id": "f5823:m1"} {"signature": "def request(query, params):", "body": "params[''] = '' % queryreturn params", "docstring": "pre-request callback\n params:\n method : POST/GET\n headers : {}\n data : {} # if method == POST\n url : ''\n category: 'search category'\n pageno : 1 # number of the requested page", "id": "f5825:m0"} {"signature": "def response(resp):", "body": "return [{'': '', '': '', '': ''}]", "docstring": "post-response callback\n resp: requests response object", "id": "f5825:m1"} {"signature": "def post_task():", "body": "", "docstring": "Test method.", "id": "f5827:m1"} {"signature": "@dumps.command()@click.argument('', type=click.File(''), nargs=-)@with_appcontextdef loaduserexts(sources):", "body": "from .tasks.oauthclient import load_userextloadcommon(sources, load_userext)", "docstring": "Load user identities (legacy UserEXT).", "id": "f5841:m11"} {"signature": "@dumps.command()@click.argument('', type=click.File(''), nargs=-)@with_appcontextdef loadremotetokens(sources):", "body": "from .tasks.oauthclient import load_remotetokenloadcommon(sources, load_remotetoken)", "docstring": "Load remote tokens.", "id": "f5841:m10"} {"signature": "@dumps.command()@click.argument('', type=click.File(''), nargs=-)@click.option('', '', type=int,help='',default=None)@with_appcontextdef loaddeposit(sources, depid):", "body": "from .tasks.deposit import load_depositif depid is not None:def pred(dep):return int(dep[\"\"][\"\"]) == depidloadcommon(sources, load_deposit, predicate=pred, asynchronous=False)else:loadcommon(sources, load_deposit)", "docstring": "Load deposit.\n\n Usage:\n invenio dumps loaddeposit ~/data/deposit_dump_*.json\n invenio dumps loaddeposit -d 12345 ~/data/deposit_dump_*.json", "id": "f5841:m8"} {"signature": "def prepare_files(self):", "body": "files = {}for f in self.data['']:k = f['']if k not in files:files[k] = []files[k].append(f)for k in files.keys():files[k].sort(key=lambda x: x[''])self.files = files", "docstring": "Get files from data dump.", "id": "f5844:c1:m6"} {"signature": "@cached_propertydef recid(self):", "body": "return self.data['']", "docstring": "Get recid.", "id": "f5844:c1:m3"} {"signature": "@propertydef rest(self):", "body": "return self.revisions[:]", "docstring": "The list of old revisions.", "id": "f5844:c1:m10"} {"signature": "def prepare_revisions(self):", "body": "self.revisions = []it = [self.data[''][]] if self.latest_onlyelse self.data['']for i in it:self.revisions.append(self._prepare_revision(i))", "docstring": "Prepare data.", "id": "f5844:c1:m5"} {"signature": "def is_deleted(self, record=None):", "body": "record = record or self.revisions[-][]return any(col == ''for col in record.get('', []))", "docstring": "Check if record is deleted.", "id": "f5844:c1:m11"} {"signature": "@cached_propertydef missing_pids(self):", "body": "missing = []for p in self.pids:try:PersistentIdentifier.get(p.pid_type, p.pid_value)except PIDDoesNotExistError:missing.append(p)return missing", "docstring": "Filter persistent identifiers.", "id": "f5844:c1:m2"} {"signature": "@shared_task()def load_remotetoken(data):", "body": "from invenio_oauthclient.models import RemoteTokenload_common(RemoteToken, data)", "docstring": "Load the remote tokens from data dump.\n\n :param data: Dictionary containing remote tokens data.\n :type data: dict", "id": "f5846:m1"} {"signature": "@shared_task()def load_remoteaccount(data):", "body": "from invenio_oauthclient.models import RemoteAccountload_common(RemoteAccount, data)", "docstring": "Load the remote accounts from data dump.\n\n :param data: Dictionary containing remote accounts data.\n :type data: dict", "id": "f5846:m0"} {"signature": "@shared_task()def load_client(data):", "body": "from invenio_oauth2server.models import Clientload_common(Client, data)", "docstring": "Load the oauth2server client from data dump.", "id": "f5847:m0"} {"signature": "def __init__(self, pid, recid, *args, **kwargs):", "body": "self.recid = recidsuper(DepositRecidDoesNotExist, self).__init__(pid, *args, **kwargs)", "docstring": "Initialize exception.", "id": "f5848:c1:m0"} {"signature": "@shared_task()def load_deposit(data):", "body": "from invenio_db import dbdeposit, dep_pid = create_record_and_pid(data)deposit = create_files_and_sip(deposit, dep_pid)db.session.commit()", "docstring": "Load the raw JSON dump of the Deposition.\n\n Uses Record API in order to bypass all Deposit-specific initialization,\n which are to be done after the final stage of deposit migration.\n\n :param data: Dictionary containing deposition data.\n :type data: dict", "id": "f5849:m0"} {"signature": "@shared_task()def load_featured(data):", "body": "from invenio_communities.models import FeaturedCommunityobj = FeaturedCommunity(id=data[''],id_community=data[''],start_date=iso2dt(data['']))db.session.add(obj)db.session.commit()", "docstring": "Load community featuring from data dump.\n\n :param data: Dictionary containing community featuring data.\n :type data: dict", "id": "f5850:m1"} {"signature": "@shared_task()def load_user(data):", "body": "from invenio_accounts.models import Userfrom invenio_userprofiles.api import UserProfileemail = data[''].strip()if User.query.filter_by(email=email).count() > :raise UserEmailExistsError(\"\".format(email=email))last_login = Noneif data['']:last_login = arrow.get(data['']).datetimeconfirmed_at = Noneif data[''] == '':confirmed_at = datetime.utcnow()salt = data['']checksum = data['']if not checksum:new_password = Noneelif checksum.startswith(''):new_password = checksumelse:new_password = str.join('', ['', u'', salt, checksum])with db.session.begin_nested():obj = User(id=data[''],password=new_password,email=email,confirmed_at=confirmed_at,last_login_at=last_login,active=(data[''] != ''),)db.session.add(obj)nickname = data[''].strip()overwritten_username = ('' in data and '' in data)if nickname or overwritten_username:p = UserProfile(user=obj)p.full_name = data.get('', '').strip()if overwritten_username:p._username = data[''].lower()p._displayname = data['']elif nickname:if UserProfile.query.filter(UserProfile._username == nickname.lower()).count() > :raise UserUsernameExistsError(\"\".format(username=nickname))try:p.username = nicknameexcept ValueError:current_app.logger.warn(u''.format(nickname, data['']))p._username = nickname.lower()p._displayname = nicknamedb.session.add(p)db.session.commit()", "docstring": "Load user from data dump.\n\n NOTE: This task takes into account the possible duplication of emails and\n usernames, hence it should be called synchronously.\n In such case of collision it will raise UserEmailExistsError or\n UserUsernameExistsError, if email or username are already existing in\n the database. Caller of this task should take care to to resolve those\n collisions beforehand or after catching an exception.\n\n :param data: Dictionary containing user data.\n :type data: dict", "id": "f5854:m0"} {"signature": "def check(id_):", "body": "BibRecDocs, BibDoc = _import_bibdoc()try:BibDoc(id_).list_all_files()except Exception:click.secho(\"\".format(id_), fg='')", "docstring": "Check bibdocs.", "id": "f5857:m6"} {"signature": "def _get_recids_invenio2(from_date):", "body": "from invenio.legacy.dbquery import run_sqlreturn (id[] for id in run_sql('''''',(from_date, ), run_on_slave=True))", "docstring": "Get BibDocs for Invenio 2.", "id": "f5857:m1"} {"signature": "def _import_bibdoc():", "body": "try:from invenio.bibdocfile import BibRecDocs, BibDocexcept ImportError:from invenio.legacy.bibdocfile.api import BibRecDocs, BibDocreturn BibRecDocs, BibDoc", "docstring": "Import BibDocFile.", "id": "f5857:m3"} {"signature": "@click.group()def cli():", "body": "pass", "docstring": "Command for dumping all records as JSON.", "id": "f5859:m0"} {"signature": "def _get_run_sql():", "body": "try:from invenio.dbquery import run_sqlexcept ImportError:from invenio.legacy.dbquery import run_sqlreturn run_sql", "docstring": "Import ``run_sql``.", "id": "f5860:m0"} {"signature": "def get_connected_roles(action_id):", "body": "try:from invenio.access_control_admin import compile_role_definitionexcept ImportError:from invenio.modules.access.firerole import compile_role_definitionrun_sql = _get_run_sql()roles = {}res = run_sql('''''''''''''', (action_id, ))for r in res:role = roles.setdefault(r[], {'': r[],'': r[],'': r[],'': r[],'': compile_role_definition(r[]),'': set(),'': {}})param = role[''].setdefault(r[], set())param.add(r[])role[''].add(r[])return six.itervalues(roles)", "docstring": "Get roles connected to an action.", "id": "f5860:m1"} {"signature": "def get(*args, **kwargs):", "body": "from invenio.modules.oauthclient.models import RemoteAccountq = RemoteAccount.queryreturn q.count(), q.all()", "docstring": "Get users.", "id": "f5861:m0"} {"signature": "def get(query, from_date, limit=, **kwargs):", "body": "dep_generator = _get_depositions()total_depids = if limit > :dep_generator = islice(dep_generator, limit)total_depids = limitreturn total_depids, dep_generator", "docstring": "Get deposits.", "id": "f5862:m3"} {"signature": "def _get_depositions(user=None, type=None):", "body": "from invenio.modules.workflows.models import BibWorkflowObject, Workflowfrom invenio.modules.deposit.models import InvalidDepositionTypefrom flask import current_appfrom invenio.ext.sqlalchemy import dbfrom invenio.modules.deposit.models import Depositionparams = [Workflow.module_name == '',]if user:params.append(BibWorkflowObject.id_user == user.get_id())else:params.append(BibWorkflowObject.id_user != )if type:params.append(Workflow.name == type.get_identifier())objects = BibWorkflowObject.query.join(\"\").options(db.contains_eager('')).filter(*params)def _create_obj(o):try:obj = Deposition(o)except InvalidDepositionType as err:current_app.logger.exception(err)return Noneif type is None or obj.type == type:return objreturn Nonedef mapper_filter(objs):for o in objs:o = _create_obj(o)if o is not None:yield oreturn mapper_filter(objects)", "docstring": "Get list of depositions (as iterator).\n\n This is redefined Deposition.get_depositions classmethod without order-by\n for better performance.", "id": "f5862:m2"} {"signature": "def default_serializer(o):", "body": "defs = (((datetime.date, datetime.time),lambda x: x.isoformat(), ),((datetime.datetime, ),lambda x: dt2utc_timestamp(x), ),)for types, fun in defs:if isinstance(o, types):return fun(o)", "docstring": "Default serializer for json.", "id": "f5862:m1"} {"signature": "def get(*args, **kwargs):", "body": "from invenio.modules.communities.models import Communityq = Community.queryreturn q.count(), q.all()", "docstring": "Get communities.", "id": "f5863:m0"} {"signature": "def get(*args, **kwargs):", "body": "from invenio.modules.oauthclient.models import RemoteTokenq = RemoteToken.queryreturn q.count(), q.all()", "docstring": "Get users.", "id": "f5864:m0"} {"signature": "def get_record_collections(recid):", "body": "try:from invenio.search_engine import (get_all_collections_of_a_record,get_restricted_collections_for_recid)except ImportError:from invenio.legacy.search_engine import (get_all_collections_of_a_record,get_restricted_collections_for_recid)collections = {'':get_all_collections_of_a_record(recid, recreate_cache_if_needed=False),}collections[''] = dict((coll, _get_collection_restrictions(coll))for coll in get_restricted_collections_for_recid(recid, recreate_cache_if_needed=False))return collections", "docstring": "Get all collections the record belong to.", "id": "f5865:m5"} {"signature": "def dump_record_json(marcxml):", "body": "try:from invenio.modules.records.api import Recordd = Record.create(marcxml, '')return d.dumps(clean=True)except ImportError:from invenio.bibfield import create_recordd = create_record(marcxml, master_format='')return d.dumps()", "docstring": "Dump JSON of record.", "id": "f5865:m6"} {"signature": "def _get_modified_recids_invenio2(from_date):", "body": "from invenio.legacy.search_engine import search_patternfrom invenio.modules.records.models import Recorddate = datetime.datetime.strptime(from_date, '')return set((x[]for x in Record.query.filter(Record.modification_date >= date).values(Record.id))), search_pattern", "docstring": "Get record ids for Invenio 2.", "id": "f5865:m1"} {"signature": "def get_record_revisions(recid, from_date):", "body": "try:from invenio.dbquery import run_sqlexcept ImportError:from invenio.legacy.dbquery import run_sqlreturn run_sql('''''', (recid, from_date),run_on_slave=True)", "docstring": "Get record revisions.", "id": "f5865:m4"} {"signature": "def _get_modified_recids_invenio12(from_date):", "body": "from invenio.search_engine import search_patternfrom invenio.dbquery import run_sqlreturn set((id[] for id in run_sql('',(from_date, ), run_on_slave=True))), search_pattern", "docstring": "Get record ids for Invenio 1.", "id": "f5865:m0"} {"signature": "def get(*args, **kwargs):", "body": "try:from invenio.modules.accounts.models import UserEXTexcept ImportError:from invenio_accounts.models import UserEXTq = UserEXT.queryreturn q.count(), q.all()", "docstring": "Get UserEXT objects.", "id": "f5867:m0"} {"signature": "def collect_things_entry_points():", "body": "things = dict()for entry_point in iter_entry_points(group=''):things[entry_point.name] = entry_point.load()return things", "docstring": "Collect entry points.", "id": "f5869:m1"} {"signature": "def memoize(func):", "body": "cache = {}@wraps(func)def wrap(*args, **kwargs):key = ''.format(args, kwargs)if key not in cache:cache[key] = func(*args, **kwargs)return cache[key]return wrap", "docstring": "Cache for heavy function calls.", "id": "f5869:m6"} {"signature": "def datetime_toutc(dt):", "body": "return dt.replace(tzinfo=tzlocal()).astimezone(tzutc())", "docstring": "Convert local datetime to UTC.", "id": "f5869:m3"} {"signature": "def dump(u, *args, **kwargs):", "body": "return dict(id=u.id,email=u.email,password=u.password,password_salt=u.password_salt,note=u.note,full_name=u.full_name if hasattr(u, '') else ''.format(u.given_names, u.family_name),settings=u.settings,nickname=u.nickname,last_login=dt2iso_or_empty(u.last_login))", "docstring": "Dump the users as a list of dictionaries.\n\n :param u: User to be dumped.\n :type u: `invenio.modules.accounts.models.User [Invenio2.x]` or namedtuple.\n :returns: User serialized to dictionary.\n :rtype: dict", "id": "f5870:m3"} {"signature": "def step(self, step_name):", "body": "@contextmanagerdef step_context(step_name):if self.event_receiver.current_case is not None:raise Exception('')self.event_receiver.begin_case(step_name, self.now_seconds(), self.name)try:yield self.event_receiverexcept:etype, evalue, tb = sys.exc_info()self.event_receiver.error('' % [etype, evalue, tb])raisefinally:self.event_receiver.end_case(step_name, self.now_seconds())return step_context(step_name)", "docstring": "Start a new step. returns a context manager which allows you to\n report an error", "id": "f5881:c0:m3"} {"signature": "def networkdays(from_date, to_date, locale=''):", "body": "holidays = locales[locale]return workdays.networkdays(from_date, to_date, holidays)", "docstring": "Return the net work days according to RH's calendar.", "id": "f5897:m0"} {"signature": "def listen(self):", "body": "self._sock.listen()while self.starting:try:sock, ip = self._sock.accept()threading.Thread(target=self.handle, args=(sock,)).start()except socket.error:pass", "docstring": "Listen on host:port", "id": "f5912:c0:m5"} {"signature": "def init_logger(self):", "body": "return PJFLogger.init_logger()", "docstring": "Init the default logger", "id": "f5912:c0:m7"} {"signature": "def custom_html(self, filepath):", "body": "try:response.headers.append(\"\", \"\")response.headers.append(\"\", \"\")response.headers.append(\"\", \"\")return static_file(filepath, root=self.config.html)except Exception as e:raise PJFBaseException(e.message if hasattr(e, \"\") else str(e))", "docstring": "Serve custom HTML page", "id": "f5915:c2:m5"} {"signature": "def shutdown(self, *args):", "body": "try:self._shutdown()if self.process:self.process.wait()self.process.stdout.close()self.process.stdin.close()self.process.stderr.close()self.finished = Trueself.send_testcase('', '', self.config.ports[\"\"][\"\"])self.logger.debug(\"\".format(time.strftime(\"\")))except Exception as e:raise PJFBaseException(e.message if hasattr(e, \"\") else str(e))", "docstring": "Shutdown the running process and the monitor", "id": "f5917:c0:m1"} {"signature": "def dup(self):", "body": "return _socketobject(_sock=self._sock)", "docstring": "dup() -> socket object\n\n Return a new socket object connected to the same system resource.", "id": "f5919:c1:m3"} {"signature": "def getfqdn(name=''):", "body": "name = name.strip()if not name or name == '':name = gethostname()try:hostname, aliases, ipaddrs = gethostbyaddr(name)except error:passelse:aliases.insert(, hostname)for name in aliases:if '' in name:breakelse:name = hostnamereturn name", "docstring": "Get fully qualified domain name from name.\n\n An empty argument is interpreted as meaning the local host.\n\n First the hostname returned by gethostbyaddr() is checked, then\n possibly existing aliases. In case no FQDN is available, hostname\n from gethostname() is returned.", "id": "f5919:m0"} {"signature": "def create_connection(address, timeout=_GLOBAL_DEFAULT_TIMEOUT,source_address=None):", "body": "host, port = addresserr = Nonefor res in getaddrinfo(host, port, , SOCK_STREAM):af, socktype, proto, canonname, sa = ressock = Nonetry:sock = socket(af, socktype, proto)if timeout is not _GLOBAL_DEFAULT_TIMEOUT:sock.settimeout(timeout)if source_address:sock.bind(source_address)sock.connect(sa)return sockexcept error as _:err = _if sock is not None:sock.close()if err is not None:raise errelse:raise error(\"\")", "docstring": "Connect to *address* and return the socket object.\n\n Convenience function. Connect to *address* (a 2-tuple ``(host,\n port)``) and return the socket object. Passing the optional\n *timeout* parameter will set the timeout on the socket instance\n before attempting to connect. If no *timeout* is supplied, the\n global default timeout setting returned by :func:`getdefaulttimeout`\n is used. If *source_address* is set it must be a tuple of (host, port)\n for the socket to bind as a source address before making the connection.\n An host of '' or port 0 tells the OS to use the default.", "id": "f5919:m2"} {"signature": "def _get_random(self, obj_type):", "body": "return self.mutator[obj_type][random.randint(, self.config.level)]", "docstring": "Get a random mutator from a list of mutators", "id": "f5925:c0:m1"} {"signature": "def random_action(self, b):", "body": "action = random.choice([, , , , , , , , , ])if len(b) >= :pos = random.randint(, len(b)-)if action == :rbyte = random.randrange()rn = random.randrange(len(b))b[rn] = \"\" % rbyteelif action == :howmany = random.randint(, )curpos = posfor _ in range(, howmany):b.insert(curpos, b[pos])pos += elif action == :n = random.choice([, , ])for _ in range(, n):if len(b) > pos+:tmp = b[pos]b[pos] = b[pos+]b[pos+] = tmppos += else:pos -= tmp = b[pos]b[pos] = b[pos+]b[pos+] = tmppos += elif action in [, ]:op = {: lambda x, y: ord(x) << y,: lambda x, y: ord(x) >> y,}n = random.choice([, , ])if len(b) < pos+n:pos = len(b) - (pos+n)if n == :f = \"\"s = op[action](b[pos], n) % elif n == :f = \"\"s = op[action](b[pos], n) % elif n == :f = \"\"s = op[action](b[pos], n) % val = struct.pack(f, s)for v in val:if isinstance(v, int):v = chr(v)b[pos] = vpos += elif action == :b.insert(random.randint(, len(b)-), random.choice([\"\", \"\", \"\", \"\", \"\", \"\", \"\"]))elif action == :del b[random.randint(, len(b)-)]elif action in [, ]:block = random.choice([(r\"\", r\"\"),(r\"\", r\"\"),(r\"\", r\"\")])b_str = self.safe_join(b)block_re = re.compile(str(\"\").format(block[], block[], block[], block[]))if block_re.search(b_str):r = random.choice(block_re.findall(b_str))random_re = re.compile(\"\".format(re.escape(r)))if random_re.search(b_str):if action == :newarr = list(random_re.sub(\"\", b_str))b[:] = newarrelse:newarr = list(random_re.sub(\"\" * random.randint(, ), b_str, ))b[:] = newarrelif action == :b_str = self.safe_join(b)limit_choice = random.choice([,-,,-,])block_re = re.compile(\"\")if block_re.search(b_str):block = random.choice([m for m in block_re.finditer(b_str)])new = b_str[:block.start()] + str(int(block.group())*limit_choice) + b_str[block.start() +len(block.group()):]b[:] = list(new)", "docstring": "Perform the actual fuzzing using random strategies", "id": "f5925:c0:m5"} {"signature": "def start(self):", "body": "from .pjf_worker import PJFWorkerworker = PJFWorker(self)if self.update_pjf:worker.update_library()elif self.browser_auto:worker.browser_autopwn()elif self.fuzz_web:worker.web_fuzzer()elif self.json:if not self.web_server and not self.ext_fuzz and not self.cmd_fuzz:worker.fuzz()elif self.ext_fuzz:if self.stdin:worker.fuzz_stdin()else:worker.fuzz_command_line()elif self.cmd_fuzz:if self.stdin:worker.fuzz_external(True)else:worker.fuzz_external()else:worker.start_http_server()elif self.json_file:worker.start_file_fuzz()elif self.process_to_monitor:worker.start_process_monitor()", "docstring": "Parse the command line and start PyJFuzz", "id": "f5926:c0:m3"} {"signature": "def __eq__(self, other):", "body": "return self.json == other", "docstring": "Check if two object are equal", "id": "f5931:c0:m3"} {"signature": "def __add__(self, other):", "body": "self.json.update(other)return self", "docstring": "Add keys to dictionary merging with another dictionary object", "id": "f5931:c0:m1"} {"signature": "@propertydef fuzzed(self):", "body": "try:if self.config.strong_fuzz:fuzzer = PJFMutators(self.config)if self.config.url_encode:if sys.version_info >= (, ):return urllib.parse.quote(fuzzer.fuzz(json.dumps(self.config.json)))else:return urllib.quote(fuzzer.fuzz(json.dumps(self.config.json)))else:if type(self.config.json) in [list, dict]:return fuzzer.fuzz(json.dumps(self.config.json))else:return fuzzer.fuzz(self.config.json)else:if self.config.url_encode:if sys.version_info >= (, ):return urllib.parse.quote(self.get_fuzzed(self.config.indent, self.config.utf8))else:return urllib.quote(self.get_fuzzed(self.config.indent, self.config.utf8))else:return self.get_fuzzed(self.config.indent, self.config.utf8)except Exception as e:raise PJFBaseException(e.message if hasattr(e, \"\") else str(e))", "docstring": "Get a printable fuzzed object", "id": "f5931:c0:m10"} {"signature": "def init_logger(self):", "body": "return PJFLogger.init_logger()", "docstring": "Init the default logger", "id": "f5931:c0:m9"} {"signature": "def __init__(self, *values, **kwargs):", "body": "super(Q, self).__init__(*values, **kwargs)self.escape = kwargs.setdefault(\"\", self.escape)self.html_js_escape = kwargs.setdefault(\"\", self.html_js_escape)self.quote = kwargs.setdefault(\"\", self.quote)", "docstring": "Create the new ``Quote`` instance\n\n :param bool escape: Whether or not quoted data should be escaped (default=``False``)\n :param bool html_js_escape: Whether or not quoted data should be html-javascript escaped (default=``False``)\n :param str quote: The quote character to be used if ``escape`` and ``html_js_escape`` are ``False``", "id": "f5940:c9:m0"} {"signature": "def __init__(self, *values, **kwargs):", "body": "self.shortest_vals = Noneself.values = list(values)if \"\" in kwargs and len(values) == :self.values = kwargs[\"\"]self.rolling = kwargs.setdefault(\"\", False)", "docstring": "Create a new ``Or`` instance with the provide values\n\n :param list values: The list of values to choose randomly from", "id": "f5940:c10:m0"} {"signature": "def __init__(self, refname, **kwargs):", "body": "self.refname = refnameself.cat = kwargs.setdefault(\"\", self.cat)self.failsafe = kwargs.setdefault(\"\", self.failsafe)self.fuzzer = GramFuzzer.instance()", "docstring": "Create a new ``Ref`` instance\n\n :param str refname: The name of the rule to reference\n :param str cat: The name of the category the rule is defined in", "id": "f5940:c13:m0"} {"signature": "def __init__(self, value=None, **kwargs):", "body": "super(String, self).__init__(value, **kwargs)self.charset = kwargs.setdefault(\"\", self.charset)", "docstring": "Create a new instance of the ``String`` field.\n\n :param value: The hard-coded value of the String field\n :param int min: The minimum size of the String when built\n :param int max: The maximum size of the String when built\n :param str charset: The character-set to be used when building the string", "id": "f5940:c6:m0"} {"signature": "def __init__(self, name, *values, **options):", "body": "self.name = nameself.options = optionsself.values = list(values)self.sep = self.options.setdefault(\"\", self.sep)self.cat = self.options.setdefault(\"\", self.cat)self.no_prune = self.options.setdefault(\"\", self.no_prune)self.fuzzer = GramFuzzer.instance()frame,mod_path,_,_,_,_ = inspect.stack()[]module_name = os.path.basename(mod_path).replace(\"\", \"\").replace(\"\", \"\")if \"\" in frame.f_locals:self.fuzzer.cat_group_defaults[module_name] = frame.f_locals[\"\"]self.fuzzer.add_definition(self.cat, self.name, self, no_prune=self.no_prune, gram_file=module_name)", "docstring": "Create a new rule definition. Simply instantiating a new rule definition\n will add it to the current ``GramFuzzer`` instance.\n\n :param str name: The name of the rule being defined\n :param list values: The list of values that define the value of the rule\n (will be concatenated when built)\n :param str cat: The category to create the rule in (default=``\"default\"``).\n :param bool no_prune: If this rule should not be pruned *EVEN IF* it is found to be\n unreachable (default=``False``)", "id": "f5940:c12:m0"} {"signature": "def data(length, charset):", "body": "return \"\".join(_choice(charset) for x in range(length))", "docstring": "Generate ``length`` random characters from charset ``charset``\n\n :param int length: The number of characters to randomly generate\n :param str charset: The charset of characters to choose from\n :returns: str", "id": "f5941:m4"} {"signature": "def set_max_recursion(self, level):", "body": "import gramfuzz.fieldsgramfuzz.fields.Ref.max_recursion = level", "docstring": "Set the maximum reference-recursion depth (not the Python system maximum stack\n recursion level). This controls how many levels deep of nested references are allowed\n before gramfuzz attempts to generate the shortest (reference-wise) rules possible.\n\n :param int level: The new maximum reference level", "id": "f5942:c0:m3"} {"signature": "def ceiling_division(numerator, denominator):", "body": "return -(-numerator // denominator)", "docstring": "Divide and round up", "id": "f5945:m3"} {"signature": "@contextmanagerdef working_directory(path):", "body": "prev_dir = os.getcwd()os.chdir(str(path))try:yieldfinally:os.chdir(prev_dir)", "docstring": "Change working directory and restore the previous on exit", "id": "f5945:m0"} {"signature": "def get_type_for_extra_dim(type_index):", "body": "try:return _extra_dims_style_1[type_index]except IndexError:raise errors.UnknownExtraType(type_index)", "docstring": "Returns the type str ('u1\" or \"u2\", etc) for the given type index\n Parameters\n ----------\n type_index: int\n index of the type as defined in the LAS Specification\n\n Returns\n -------\n str,\n a string representing the type, can be understood by numpy", "id": "f5972:m1"} {"signature": "def __setattr__(self, key, value):", "body": "if key in dims.DIMENSIONS or key in self.points_data.all_dimensions_names:self.points_data[key] = valueelse:super().__setattr__(key, value)", "docstring": "This is called on every access to an attribute of the instance.\n Again we use this to forward the call the the points record\n\n But this time checking if the key is actually a dimension name\n so that an error is raised if the user tries to set a valid\n LAS dimension even if it is not present in the field.\n eg: user tries to set the red field of a file with point format 0:\n an error is raised", "id": "f5974:c0:m11"} {"signature": "def write_to_file(self, filename, do_compress=None):", "body": "is_ext_laz = filename.split(\"\")[-] == \"\"if is_ext_laz and do_compress is None:do_compress = Truewith open(filename, mode=\"\") as out:self.write_to(out, do_compress=do_compress)", "docstring": "Writes the las data into a file\n\n Parameters\n ----------\n filename : str\n The file where the data should be written.\n do_compress: bool, optional, default None\n if None the extension of the filename will be used\n to determine if the data should be compressed\n otherwise the do_compress flag indicate if the data should be compressed", "id": "f5974:c0:m18"} {"signature": "def __getattr__(self, item):", "body": "return self.points_data[item]", "docstring": "Automatically called by Python when the attribute\n named 'item' is no found. We use this function to forward the call the\n point record. This is the mechanism used to allow the users to access\n the points dimensions directly through a LasData.\n\n Parameters\n ----------\n item: str\n name of the attribute, should be a dimension name\n\n Returns\n -------\n The requested dimension if it exists", "id": "f5974:c0:m10"} {"signature": "@propertydef points(self):", "body": "return self.points_data.array", "docstring": "returns the numpy array representing the points\n\n Returns\n -------\n the Numpy structured array of points", "id": "f5974:c0:m8"} {"signature": "@propertydef x(self):", "body": "return scale_dimension(self.X, self.header.x_scale, self.header.x_offset)", "docstring": "Returns the scaled x positions of the points as doubles", "id": "f5974:c0:m1"} {"signature": "def _raise_if_wrong_file_signature(stream):", "body": "file_sig = stream.read(len(headers.LAS_FILE_SIGNATURE))if file_sig != headers.LAS_FILE_SIGNATURE:raise errors.PylasError(\"\".format(file_sig, headers.LAS_FILE_SIGNATURE))", "docstring": "Reads the 4 first bytes of the stream to check that is LASF", "id": "f5976:m0"} {"signature": "def read_vlrs(self):", "body": "self.stream.seek(self.start_pos + self.header.size)return VLRList.read_from(self.stream, num_to_read=self.header.number_of_vlr)", "docstring": "Reads and return the vlrs of the file", "id": "f5976:c0:m2"} {"signature": "def read(self):", "body": "vlrs = self.read_vlrs()self._warn_if_not_at_expected_pos(self.header.offset_to_point_data, \"\", \"\")self.stream.seek(self.start_pos + self.header.offset_to_point_data)try:points = self._read_points(vlrs)except (RuntimeError, errors.LazPerfNotFound) as e:logger.error(\"\".format(e))self.stream.seek(self.start_pos)self.__init__(io.BytesIO(laszip_decompress(self.stream)))return self.read()if points.point_format.has_waveform_packet:self.stream.seek(self.start_pos + self.header.start_of_waveform_data_packet_record)if self.header.global_encoding.are_waveform_flag_equal():raise errors.PylasError(\"\".format(\"\"if self.header.global_encoding.waveform_internalelse \"\"))if self.header.global_encoding.waveform_internal:_, _ = self._read_internal_waveform_packet()elif self.header.global_encoding.waveform_external:logger.info(\"\")if self.header.version >= \"\":evlrs = self.read_evlrs()return las14.LasData(header=self.header, vlrs=vlrs, points=points, evlrs=evlrs)return las12.LasData(header=self.header, vlrs=vlrs, points=points)", "docstring": "Reads the whole las data (header, vlrs ,points, etc) and returns a LasData\n object", "id": "f5976:c0:m3"} {"signature": "@classmethoddef empty(cls, point_format):", "body": "return cls.zeros(point_format, point_count=)", "docstring": "Creates an empty point record.\n\n Parameters\n ----------\n point_format: pylas.PointFormat\n The point format id the point record should have\n\n Returns\n -------\n PackedPointRecord", "id": "f5977:c2:m4"} {"signature": "@propertydef point_size(self):", "body": "return self.array.dtype.itemsize", "docstring": "Returns the point size in bytes taken by each points of the record\n\n Returns\n -------\n int\n The point size in byte", "id": "f5977:c2:m2"} {"signature": "@propertydef extra_dimension_names(self):", "body": "return [extd[] for extd in self.extra_dims]", "docstring": "Returns the list of extra dimensions attached to this point format", "id": "f5980:c0:m6"} {"signature": "def __init__(self, point_format_id, extra_dims=None):", "body": "self.id = point_format_idif extra_dims is None:extra_dims = []self.extra_dims = extra_dims", "docstring": "Parameters\n----------\npoint_format_id: int\n point format id\nextra_dims: list of tuple\n [(name, type_str), ..] of extra dimensions attached to this point format", "id": "f5980:c0:m0"} {"signature": "@propertydef composed_fields(self):", "body": "return self._access_dict(dims.COMPOSED_FIELDS, self.id)", "docstring": "Returns the dict of composed fields defined for the point format\n\n Returns\n -------\n Dict[str, List[SubFields]]\n maps a composed field name to its sub_fields", "id": "f5980:c0:m4"} {"signature": "@mins.setterdef mins(self, value):", "body": "self.x_min, self.y_min, self.z_min = value", "docstring": "Sets de minimum values of x, y, z as a numpy array", "id": "f5984:c2:m17"} {"signature": "@point_format_id.setterdef point_format_id(self, value):", "body": "self._point_data_format_id = value", "docstring": "Returns the point format id of the points", "id": "f5984:c2:m10"} {"signature": "@propertydef date(self):", "body": "try:return datetime.date(self.creation_year, , ) + datetime.timedelta(self.creation_day_of_year - )except ValueError:return None", "docstring": "Returns the creation date stored in the las file\n\n Returns\n -------\n datetime.date", "id": "f5984:c2:m7"} {"signature": "@propertydef mins(self):", "body": "return np.array([self.x_min, self.y_min, self.z_min])", "docstring": "Returns de minimum values of x, y, z as a numpy array", "id": "f5984:c2:m16"} {"signature": "def files_have_same_dtype(las_files):", "body": "dtypes = {las.points.dtype for las in las_files}return len(dtypes) == ", "docstring": "Returns true if all the files have the same numpy datatype", "id": "f5986:m2"} {"signature": "@staticmethod@abstractmethoddef official_record_ids():", "body": "pass", "docstring": "Shall return the official record_id for the VLR\n\n .. note::\n\n Even if the VLR has one record_id, the return type must be a tuple\n\n Returns\n -------\n tuple of int\n The record_ids this VLR type can have", "id": "f5988:c0:m1"} {"signature": "@staticmethod@abstractmethoddef official_user_id():", "body": "pass", "docstring": "Shall return the official user_id as described in the documentation", "id": "f5988:c0:m0"} {"signature": "@classmethoddef read_from(cls, data_stream, num_to_read):", "body": "vlrlist = cls()for _ in range(num_to_read):raw = RawVLR.read_from(data_stream)try:vlrlist.append(vlr_factory(raw))except UnicodeDecodeError:logger.error(\"\".format(raw))return vlrlist", "docstring": "Reads vlrs and parse them if possible from the stream\n\n Parameters\n ----------\n data_stream : io.BytesIO\n stream to read from\n num_to_read : int\n number of vlrs to be read\n\n Returns\n -------\n pylas.vlrs.vlrlist.VLRList\n List of vlrs", "id": "f5991:c1:m13"} {"signature": "def append(self, vlr):", "body": "self.vlrs.append(vlr)", "docstring": "append a vlr to the list\n\n Parameters\n ----------\n vlr: RawVlR or VLR or KnownVlr\n\n Returns\n -------", "id": "f5991:c1:m1"} {"signature": "def delete(self, resource_id):", "body": "self.logger.debug(''.format(resource_id))if self.driver._es.exists(index=self.driver._index,id=resource_id,doc_type='') == False:raise ValueError(\"\".format(resource_id))return self.driver._es.delete(index=self.driver._index,id=resource_id,doc_type='')", "docstring": "Delete an object from elasticsearch.\n :param resource_id: id of the object to be deleted.\n :return:", "id": "f6004:c0:m5"} {"signature": "def update(self, obj, resource_id):", "body": "self.logger.debug(''.format(resource_id))return self.driver._es.index(index=self.driver._index,id=resource_id,body=obj,doc_type='',refresh='')['']", "docstring": "Update object in elasticsearch using the resource_id.\n :param metadata: new metadata for the transaction.\n :param resource_id: id of the object to be updated.\n :return: id of the object.", "id": "f6004:c0:m4"} {"signature": "def query(self, search_model: QueryModel):", "body": "query_parsed = query_parser(search_model.query)self.logger.debug(f'')if search_model.sort is not None:self._mapping_to_sort(search_model.sort.keys())sort = self._sort_object(search_model.sort)else:sort = [{\"\": \"\"}]if search_model.query == {}:query = {'': {}}else:query = query_parsed[]body = {'': query,'': sort,'': (search_model.page - ) * search_model.offset,'': search_model.offset,}page = self.driver._es.search(index=self.driver._index,doc_type='',body=body,q=query_parsed[])object_list = []for x in page['']['']:object_list.append(x[''])return object_list", "docstring": "Query elasticsearch for objects.\n :param search_model: object of QueryModel.\n :return: list of objects that match the query.", "id": "f6004:c0:m7"} {"signature": "@propertydef type(self):", "body": "return ''", "docstring": "str: the type of this plugin (``'Elasticsearch'``)", "id": "f6004:c0:m1"} {"signature": "def text_query(self, search_model: FullTextModel):", "body": "self.logger.debug(''.format(search_model.text))if search_model.sort is not None:self._mapping_to_sort(search_model.sort.keys())sort = self._sort_object(search_model.sort)else:sort = [{\"\": \"\"}]body = {'': sort,'': (search_model.page - ) * search_model.offset,'': search_model.offset,}page = self.driver._es.search(index=self.driver._index,doc_type='',body=body,q=search_model.text)object_list = []for x in page['']['']:object_list.append(x[''])return object_list", "docstring": "Query elasticsearch for objects.\n :param search_model: object of FullTextModel\n :return: list of objects that match the query.", "id": "f6004:c0:m8"} {"signature": "def read(self, resource_id):", "body": "self.logger.debug(''.format(resource_id))return self.driver._es.get(index=self.driver._index,id=resource_id,doc_type='')['']", "docstring": "Read object in elasticsearch using the resource_id.\n :param resource_id: id of the object to be read.\n :return: object value from elasticsearch.", "id": "f6004:c0:m3"} {"signature": "def reduceCnf(cnf):", "body": "output = Cnf()for x in cnf.dis:dont_add = Falsefor y in x:for z in x:if z == -y:dont_add = Truebreakif dont_add: breakif dont_add: continueif x not in output.dis:output.dis |= frozenset([x])return output", "docstring": "I just found a remarkably large bug in my SAT solver and found an\ninteresting solution.\nRemove all b | -b\n(-b | b) & (b | -a) & (-b | a) & (a | -a)\nbecomes\n(b | -a) & (-b | a)\n\nRemove all (-e) & (-e)\n(-e | a) & (-e | a) & (-e | a) & (-e | a)\nbecomes\n(-e | a)\n(-b | b | c) becomes nothing, not (c)", "id": "f6009:m0"} {"signature": "def Bin(self):", "body": "err = _Bin(self.transit, self.limbdark, self.settings, self.arrays)if err != _ERR_NONE: RaiseError(err)", "docstring": "Bins the light curve model to the provided time array", "id": "f6024:c4:m4"} {"signature": "@propertydef duration(self):", "body": "ecc = self.ecc if not np.isnan(self.ecc) else np.sqrt(self.ecw** + self.esw**)esw = self.esw if not np.isnan(self.esw) else ecc * np.sin(self.w)aRs = ((G * self.rhos * ( + self.MpMs) * (self.per * DAYSEC)**) / ( * np.pi))**(/)inc = np.arccos(self.bcirc/aRs)becc = self.bcirc * ( - ecc**)/( - esw)tdur = self.per / / np.pi * np.arcsin((( + self.RpRs)** -becc**)** / (np.sin(inc) * aRs))tdur *= np.sqrt( - ecc**)/( - esw)return tdur", "docstring": "The approximate transit duration for the general case of an eccentric orbit", "id": "f6024:c0:m4"} {"signature": "def __del__(self):", "body": "self.Free()", "docstring": "Free the C arrays when the last reference to the class goes out of scope!", "id": "f6024:c4:m6"} {"signature": "def Free(self):", "body": "if self.arrays._calloc:_dbl_free(self.arrays._time)_dbl_free(self.arrays._flux)_dbl_free(self.arrays._bflx)_dbl_free(self.arrays._M)_dbl_free(self.arrays._E)_dbl_free(self.arrays._f)_dbl_free(self.arrays._r)_dbl_free(self.arrays._x)_dbl_free(self.arrays._y)_dbl_free(self.arrays._z)self.arrays._calloc = if self.arrays._balloc: _dbl_free(self.arrays._b)self.arrays._balloc = if self.arrays._ialloc:_dbl_free(self.arrays._iarr)self.arrays._ialloc = ", "docstring": "Frees the memory used by all of the dynamically allocated C arrays.", "id": "f6024:c4:m5"} {"signature": "def create_validator(data_struct_dict, name=None):", "body": "if name is None:name = ''attrs = {}for field_name, field_info in six.iteritems(data_struct_dict):field_type = field_info['']if field_type == DictField.FIELD_TYPE_NAME and isinstance(field_info.get(''), dict):field_info[''] = create_validator(field_info[''])attrs[field_name] = create_field(field_info)name = force_str(name)return type(name, (Validator, ), attrs)", "docstring": "create a Validator instance from data_struct_dict\n\n:param data_struct_dict: a dict describe validator's fields, like the dict `to_dict()` method returned.\n:param name: name of Validator class \n\n:return: Validator instance", "id": "f6048:m0"} {"signature": "def __init__(self, raw_data):", "body": "assert isinstance(raw_data, dict), ''.format(type(raw_data).__name__)self.raw_data = raw_dataself.validated_data = Noneself.errors = {}", "docstring": ":param raw_data: unvalidate data", "id": "f6048:c1:m0"} {"signature": "@classmethoddef to_dict(cls):", "body": "d = dict()for name, field in six.iteritems(cls._FIELDS_MAP):field_info = field.to_dict()d[name] = field_inforeturn d", "docstring": "format Validator to dict", "id": "f6048:c1:m5"} {"signature": "def validate(self, data):", "body": "return data", "docstring": "model-level validate.\nsub-class can override this method to validate data, return modified data", "id": "f6048:c1:m3"} {"signature": "@classmethoddef _get_all_params(cls):", "body": "params = list(cls.PARAMS)bases = cls.__bases__for base in bases:if issubclass(base, BaseField):params.extend(base._get_all_params())return params", "docstring": "Collect all PARAMS from this class and its parent class.", "id": "f6050:c2:m4"} {"signature": "def validate(self, value):", "body": "value = self._validate(value)for v in self.validators:v(value)return value", "docstring": "return validated value or raise FieldValidationError.", "id": "f6050:c2:m5"} {"signature": "def to_service(self, service, version):", "body": "service_url = self._service_locator.get_service_url(service, version)return self.__copy_and_set('', self.__strip_trailing_slashes(service_url))", "docstring": "Sets the service name and version the request should target\n\n Args:\n service (str): The name of the service as displayed in the services.json file\n version (str): The version of the service as displayed in the services.json file\n\n Returns:\n The request builder instance in order to chain calls", "id": "f6064:c0:m4"} {"signature": "def delete(self):", "body": "return self.__send('')", "docstring": "Sends the request as parametrized with the DELETE verb\n\n Returns:\n The response object or body depending of the parametrization\n\n Raises:\n Any exception parametrized with the `throw` method", "id": "f6064:c0:m18"} {"signature": "def get(self):", "body": "return self.__send('')", "docstring": "Sends the request as parametrized with the GET verb\n\n Returns:\n The response object or body depending of the parametrization\n\n Raises:\n Any exception parametrized with the `throw` method", "id": "f6064:c0:m16"} {"signature": "def post(self):", "body": "return self.__send('')", "docstring": "Sends the request as parametrized with the POST verb\n\n Returns:\n The response object or body depending of the parametrization\n\n Raises:\n Any exception parametrized with the `throw` method", "id": "f6064:c0:m17"} {"signature": "def put(self):", "body": "return self.__send('')", "docstring": "Sends the request as parametrized with the PUT verb\n\n Returns:\n The response object or body depending of the parametrization\n\n Raises:\n Any exception parametrized with the `throw` method", "id": "f6064:c0:m19"} {"signature": "def to_url(self, url):", "body": "return self.__copy_and_set('', url)", "docstring": "Sets the request target url\n\n Args:\n url (str): The url the request should be targeted to\n\n Returns:\n The request builder instance in order to chain calls", "id": "f6064:c0:m3"} {"signature": "def to_endpoint(self, endpoint):", "body": "return self.__copy_and_set('', self.__strip_leading_slashes(endpoint))", "docstring": "Sets the endpoint of the service the request should target\n\n Args:\n endpoint (str): The endpoint that will be concatenated to the service url\n\n Returns:\n The request builder instance in order to chain calls", "id": "f6064:c0:m5"} {"signature": "@classmethoddef new(cls, access_token, environment=''):", "body": "return cls(storage_client=StorageClient.new(access_token, environment=environment))", "docstring": "Creates a new cross-service client.", "id": "f6066:c0:m1"} {"signature": "def list(self, path):", "body": "self.__validate_storage_path(path)entity = self.api_client.get_entity_by_query(path=path)if entity[''] not in self.__BROWSABLE_TYPES:raise StorageArgumentException(''''.format(entity['']))entity_uuid = entity['']file_names = []more_pages = Truepage_number = while more_pages:response = self.api_client.list_folder_content(entity_uuid, page=page_number, ordering='')more_pages = response[''] is not Nonepage_number += for child in response['']:pattern = '' if child[''] == '' else ''file_names.append(pattern.format(name=child['']))return file_names", "docstring": "List the entities found directly under the given path.\n\n Args:\n path (str): The path of the entity to be listed. Must start with a '/'.\n\n Returns:\n The list of entity names directly under the given path:\n\n u'/12345/folder_1'\n\n Raises:\n StorageArgumentException: Invalid arguments\n StorageForbiddenException: Server response code 403\n StorageNotFoundException: Server response code 404\n StorageException: other 400-600 error codes", "id": "f6068:c0:m2"} {"signature": "def __init__(self, services_url):", "body": "self.__services_url = services_url", "docstring": "Create new service locator\n Arguments:\n services_url: The url of the json file where the services url are defined\n Returns:\n A service locator instance", "id": "f6070:c0:m0"} {"signature": "def get_service_url(self, service, version):", "body": "return self.__get_services()[service][version]", "docstring": "Get the service URL\n Arguments:\n service: The service name\n version: The service version\n Returns:\n The URL where the service is located", "id": "f6070:c0:m2"} {"signature": "def delete_file(self, file_id):", "body": "if not is_valid_uuid(file_id):raise StorageArgumentException(''.format(file_id))self._authenticated_request.to_endpoint(''.format(file_id)).delete()", "docstring": "Delete a file.\n\n Args:\n file_id (str): The UUID of the file to delete.\n\n Returns:\n None\n\n Raises:\n StorageArgumentException: Invalid arguments\n StorageForbiddenException: Server response code 403\n StorageNotFoundException: Server response code 404\n StorageException: other 400-600 error codes", "id": "f6071:c0:m26"} {"signature": "def delete_folder(self, folder):", "body": "if not is_valid_uuid(folder):raise StorageArgumentException(''.format(folder))self._authenticated_request.to_endpoint(''.format(folder)).delete()", "docstring": "Delete a folder. It will recursively delete all the content.\n\n Args:\n folder_id (str): The UUID of the folder to be deleted.\n\n Returns:\n None\n\n Raises:\n StorageArgumentException: Invalid arguments\n StorageForbiddenException: 403\n StorageNotFoundException: 404\n HTTPError: other non-20x error codes", "id": "f6071:c0:m19"} {"signature": "def create_project(self, collab_id):", "body": "return self._authenticated_request.to_endpoint('').with_json_body(self._prep_params(locals())).return_body().post()", "docstring": "Create a new project.\n\n Args:\n collab_id (int): The id of the collab the project should be created in.\n\n Returns:\n A dictionary of details of the created project::\n\n {\n u'collab_id': 12998,\n u'created_by': u'303447',\n u'created_on': u'2017-03-21T14:06:32.293902Z',\n u'description': u'',\n u'entity_type': u'project',\n u'modified_by': u'303447',\n u'modified_on': u'2017-03-21T14:06:32.293967Z',\n u'name': u'12998',\n u'uuid': u'2516442e-1e26-4de1-8ed8-94523224cc40'\n }\n\n Raises:\n StorageForbiddenException: Server response code 403\n StorageNotFoundException: Server response code 404\n StorageException: other 400-600 error codes", "id": "f6071:c0:m14"} {"signature": "def get_entity_collab_id(self, entity_id):", "body": "if not is_valid_uuid(entity_id):raise StorageArgumentException(''.format(entity_id))return self._authenticated_request.to_endpoint(''.format(entity_id)).return_body().get()[\"\"]", "docstring": "Retrieve entity Collab ID.\n\n Args:\n entity_id (str): The UUID of the requested entity.\n\n Returns:\n The id as interger of the Collebaration to which the entity belongs\n\n Raises:\n StorageArgumentException: Invalid arguments\n StorageForbiddenException: Server response code 403\n StorageNotFoundException: Server response code 404\n StorageException: other 400-600 error codes", "id": "f6071:c0:m5"} {"signature": "def download_file_content(self, file_id, etag=None):", "body": "if not is_valid_uuid(file_id):raise StorageArgumentException(''.format(file_id))headers = {'': ''}if etag:headers[''] = etagresp = self._authenticated_request.to_endpoint(''.format(file_id)).with_headers(headers).get()if resp.status_code == :return (None, None)if '' not in resp.headers:raise StorageException('')return (resp.headers[''], resp.content)", "docstring": "Download file content.\n\n Args:\n file_id (str): The UUID of the file whose content is requested\n etag (str): If the content is not changed since the provided ETag,\n the content won't be downloaded. If the content is changed, it\n will be downloaded and returned with its new ETag.\n\n Note:\n ETags should be enclosed in double quotes::\n\n my_etag = '\"71e1ed9ee52e565a56aec66bc648a32c\"'\n\n\n Returns:\n A tuple of ETag and content (etag, content) if the content was\n retrieved. If an etag was provided, and content didn't change\n returns (None, None)::\n\n ('\"71e1ed9ee52e565a56aec66bc648a32c\"', 'Hello world!')\n\n Raises:\n StorageArgumentException: Invalid arguments\n StorageForbiddenException: Server response code 403\n StorageNotFoundException: Server response code 404\n StorageException: other 400-600 error codes", "id": "f6071:c0:m24"} {"signature": "@staticmethoddef _prep_params(params):", "body": "return {k: v for (k, v) in params.items() if v is not None and k != ''}", "docstring": "Remove empty (None) valued keywords and self from function parameters", "id": "f6071:c0:m2"} {"signature": "def get_metadata(self, entity_type, entity_id):", "body": "if not is_valid_uuid(entity_id):raise StorageArgumentException(''.format(entity_id))return self._authenticated_request.to_endpoint(''.format(entity_type, entity_id)).return_body().get()", "docstring": "Get metadata of an entity.\n\n Args:\n entity_type (str): Type of the entity. Admitted values: ['project',\n 'folder', 'file'].\n entity_id (str): The UUID of the entity to be modified.\n\n Returns:\n A dictionary of the metadata::\n\n {\n u'bar': u'200',\n u'foo': u'100'\n }\n\n Raises:\n StorageArgumentException: Invalid arguments\n StorageForbiddenException: Server response code 403\n StorageNotFoundException: Server response code 404\n StorageException: other 400-600 error codes", "id": "f6071:c0:m8"} {"signature": "def copy_file_content(self, file_id, source_file):", "body": "if not is_valid_uuid(file_id):raise StorageArgumentException(''.format(file_id))if not is_valid_uuid(source_file):raise StorageArgumentException(''.format(source_file))self._authenticated_request.to_endpoint(''.format(file_id)).with_headers({'': source_file}).put()", "docstring": "Copy file content from source file to target file.\n\n Args:\n file_id (str): The UUID of the file whose content is written.\n source_file (str): The UUID of the file whose content is copied.\n\n Returns:\n None\n\n Raises:\n StorageArgumentException: Invalid arguments\n StorageForbiddenException: Server response code 403\n StorageNotFoundException: Server response code 404\n StorageException: other 400-600 error codes", "id": "f6071:c0:m23"} {"signature": "def list_folder_content(self, folder, name=None, entity_type=None,content_type=None, page_size=DEFAULT_PAGE_SIZE,page=None, ordering=None):", "body": "if not is_valid_uuid(folder):raise StorageArgumentException(''.format(folder))params = self._prep_params(locals())del params[''] return self._authenticated_request.to_endpoint(''.format(folder)).with_params(params).return_body().get()", "docstring": "List files and folders (not recursively) contained in the folder.\n\n This function does not retrieve all results, pages have\n to be manually retrieved by the caller.\n\n Args:\n folder (str): The UUID of the requested folder.\n name (str): Optional filter on entity name.\n entity_type (str): Optional filter on entity type.\n Admitted values: ['file', 'folder'].\n content_type (str): Optional filter on entity content type (only\n files are returned).\n page_size (int): Number of elements per page.\n page (int): Number of the page.\n ordering (str): Indicate on which fields to sort the result. Prepend\n '-' to invert order. Multiple values can be provided.\n Ordering is supported on: ['name', 'created_on', 'modified_on'].\n Example: 'ordering=name,created_on'\n\n Returns:\n A dictionary of the results::\n\n {\n u'count': 1,\n u'next': None,\n u'previous': None,\n u'results': [{u'content_type': u'plain/text',\n u'created_by': u'303447',\n u'created_on': u'2017-03-13T10:17:01.688472Z',\n u'description': u'',\n u'entity_type': u'file',\n u'modified_by': u'303447',\n u'modified_on': u'2017-03-13T10:17:01.688632Z',\n u'name': u'file_1',\n u'parent': u'eac11058-4ae0-4ea9-ada8-d3ea23887509',\n u'uuid': u'0e17eaac-cb00-4336-b9d7-657026844281'}]\n }\n\n Raises:\n StorageArgumentException: Invalid arguments\n StorageForbiddenException: Server response code 403\n StorageNotFoundException: Server response code 404\n StorageException: other 400-600 error codes", "id": "f6071:c0:m18"} {"signature": "def decode_cmd_out(self, completed_cmd):", "body": "try:stdout = completed_cmd.stdout.encode('').decode()except AttributeError:try:stdout = str(bytes(completed_cmd.stdout), '').strip()except AttributeError:stdout = str(bytes(completed_cmd.stdout).decode('')).strip()try:stderr = completed_cmd.stderr.encode('').decode()except AttributeError:try:stderr = str(bytes(completed_cmd.stderr), '').strip()except AttributeError:stderr = str(bytes(completed_cmd.stderr).decode('')).strip()return ParsedCompletedCommand(completed_cmd.returncode,completed_cmd.args,stdout,stderr)", "docstring": "return a standard message", "id": "f6075:c0:m2"} {"signature": "def __init_cache_file_if_needed(self):", "body": "exp_cache_file = self.get_data_path(self.name, self.version)if not os.path.isdir(exp_cache_file):os.makedirs(exp_cache_file)", "docstring": "Inits a file that we log historical experiments\n:return:", "id": "f6083:c0:m3"} {"signature": "def __get_hopt_params(self, trial):", "body": "params = []for k in trial.__dict__:v = trial.__dict__[k]if v is None or v == False:continueif self.__should_escape(v):cmd = ''.format(k, v)else:cmd = ''.format(k, v)params.append(cmd)params.append(''.format(HyperOptArgumentParser.TRIGGER_CMD))full_cmd = ''.join(params)return full_cmd", "docstring": "Turns hopt trial into script params\n:param trial:\n:return:", "id": "f6087:c1:m12"} {"signature": "def run_setup(script_name, script_args=None, stop_after=\"\"):", "body": "if stop_after not in ('', '', '', ''):raise ValueError(\"\" % stop_after)core._setup_stop_after = stop_aftersave_argv = sys.argvglocals = copy(globals())glocals[''] = script_nameglocals[''] = \"\"try:try:sys.argv[] = script_nameif script_args is not None:sys.argv[:] = script_argsf = open(script_name)try:exec(f.read(), glocals, glocals)finally:f.close()finally:sys.argv = save_argvcore._setup_stop_after = Noneexcept Exception:logging.warn(\"\", exc_info=True)if core._setup_distribution is None:raise RuntimeError(\"\"\"\" %script_name)return core._setup_distribution", "docstring": "Run a setup script in a somewhat controlled environment, and\n return the Distribution instance that drives things. This is useful\n if you need to find out the distribution meta-data (passed as\n keyword args from 'script' to 'setup()', or the contents of the\n config files or command-line.\n\n 'script_name' is a file that will be run with 'execfile()';\n 'sys.argv[0]' will be replaced with 'script' for the duration of the\n call. 'script_args' is a list of strings; if supplied,\n 'sys.argv[1:]' will be replaced by 'script_args' for the duration of\n the call.\n\n 'stop_after' tells 'setup()' when to stop processing; possible\n values:\n init\n stop after the Distribution instance has been created and\n populated with the keyword arguments to 'setup()'\n config\n stop after config files have been parsed (and their data\n stored in the Distribution instance)\n commandline\n stop after the command-line ('sys.argv[1:]' or 'script_args')\n have been parsed (and the data stored in the Distribution)\n run [default]\n stop after all commands have been run (the same as if 'setup()'\n had been called in the usual way\n\n Returns the Distribution instance, which provides all information\n used to drive the Distutils.", "id": "f6103:m0"} {"signature": "def valid_set(self):", "body": "", "docstring": ":rtype: list of tuple", "id": "f6118:c0:m1"} {"signature": "def train_size(self):", "body": "train_set = self.train_set()if isinstance(train_set, collections.Iterable):return len(list(train_set))else:return None", "docstring": "Return size of training data. (optional)\n:rtype: number", "id": "f6118:c0:m3"} {"signature": "def pad_dataset(subset, side=\"\", length=-):", "body": "assert length == - or length > if type(subset[][][]) in [float, int, np.int64, np.int32, np.float32]:return _pad_2d(subset, side, length)else:return _pad_3d(subset, side, length)", "docstring": "Pad data set to specified length.\nParameters:\n length - max length, a just to the max length in the batch if length is -1", "id": "f6120:m0"} {"signature": "def monitor_layer_outputs(self):", "body": "for layer, hidden in zip(self.layers, self._hidden_outputs):self.training_monitors.append(('' % (layer.name), abs(hidden).mean()))", "docstring": "Monitoring the outputs of each layer.\nUseful for troubleshooting convergence problems.", "id": "f6129:c0:m7"} {"signature": "def load_params(self, path, exclude_free_params=False):", "body": "if not os.path.exists(path): return;logging.info(\"\" % path)if exclude_free_params:params_to_load = self.parameterselse:params_to_load = self.all_parametersif path.endswith(\"\"):opener = gzip.open if path.lower().endswith('') else openhandle = opener(path, '')saved_params = pickle.load(handle)handle.close()for target, source in zip(params_to_load, saved_params):logging.info('', target.name, source.shape)target.set_value(source)elif path.endswith(\"\"):arrs = np.load(path)for target, idx in zip(params_to_load, range(len(arrs.keys()))):source = arrs['' % idx]logging.info('', target.name, source.shape)target.set_value(source)else:raise Exception(\"\" % path)self.train_logger.load(path)", "docstring": "Load parameters from file.", "id": "f6129:c0:m17"} {"signature": "def register(self, *layers):", "body": "for layer in layers:self.register_layer(layer)", "docstring": "Register multiple layers as the components of the network.\nThe parameter of those layers will be trained.\nBut the output of the layer will not be stacked.", "id": "f6129:c0:m2"} {"signature": "def stack(self, *layers):", "body": "for layer in layers:self.stack_layer(layer)return self", "docstring": "Stack layers.", "id": "f6129:c0:m5"} {"signature": "def register_layer(self, layer):", "body": "if type(layer) == Block:layer.fix()self.parameter_count += layer.parameter_countself.parameters.extend(layer.parameters)self.free_parameters.extend(layer.free_parameters)self.training_monitors.extend(layer.training_monitors)self.testing_monitors.extend(layer.testing_monitors)self.updates.extend(layer.updates)self.training_updates.extend(layer.training_updates)self.input_variables.extend(layer.external_inputs)self.target_variables.extend(layer.external_targets)self.training_callbacks.extend(layer.training_callbacks)self.testing_callbacks.extend(layer.testing_callbacks)self.epoch_callbacks.extend(layer.epoch_callbacks)", "docstring": "Register the layer so that it's param will be trained.\nBut the output of the layer will not be stacked.", "id": "f6129:c0:m3"} {"signature": "@propertydef cost(self):", "body": "return T.constant()", "docstring": "Return cost variable.", "id": "f6129:c0:m14"} {"signature": "def report(self):", "body": "logging.info(\"\", \"\".join(map(str, self.input_variables)))logging.info(\"\", \"\".join(map(str, self.target_variables)))logging.info(\"\", \"\".join(map(str, self.all_parameters)))logging.info(\"\", self.parameter_count)", "docstring": "Print network statistics.", "id": "f6129:c0:m18"} {"signature": "def stack_encoders(self, *layers):", "body": "self.stack(*layers)self.encoding_layes.extend(layers)", "docstring": "Stack encoding layers, this must be done before stacking decoding layers.", "id": "f6130:c0:m4"} {"signature": "def stack_decoders(self, *layers):", "body": "self.stack(*layers)self.decoding_layers.extend(layers)", "docstring": "Stack decoding layers.", "id": "f6130:c0:m5"} {"signature": "def optimize_function(params, config=None):", "body": "gs = [dim_to_var(p.ndim) for p in params]updates, _ = optimize_updates(params, gs, config)return theano.function(gs, [], updates=updates)", "docstring": "Create a optimizing function receives gradients.\nParameters:\n params - parameters\n config - training configuration\nReturns:\n updating function receives gradients", "id": "f6133:m1"} {"signature": "def __init__(self, patience=, anneal_times=):", "body": "self._iter = self._annealed_iter = self._patience = patienceself._anneal_times = anneal_timesself._annealed_times = self._learning_rate = if type(self._learning_rate) == float:raise Exception(\"\")", "docstring": ":type trainer: deepy.trainers.base.NeuralTrainer", "id": "f6134:c0:m0"} {"signature": "def momentum_core(params, gradients, momentum=, learning_rate=):", "body": "free_parameters = []updates = []for param, grad in zip(params, gradients):delta = learning_rate * gradvelocity = theano.shared(np.zeros_like(param.get_value()), name=param.name + '')updates.append((velocity, momentum * velocity - delta))updates.append((param, param + velocity))free_parameters.append(velocity)return updates, free_parameters", "docstring": "Momentum SGD optimization core.", "id": "f6141:m0"} {"signature": "def report(self, score_map, type=\"\", epoch=-, new_best=False):", "body": "type_str = typeif len(type_str) < :type_str += \"\" * ( - len(type_str))info = \"\".join(\"\" % el for el in score_map.items())current_epoch = epoch if epoch > else self.current_epoch()epoch_str = \"\".format(current_epoch + )if epoch < :epoch_str = \"\"sys.stdout.write(\"\")sys.stdout.flush()marker = \"\" if new_best else \"\"message = \"\".format(type_str, epoch_str, info, marker)self.network.train_logger.record(message)logging.info(message)", "docstring": "Report the scores and record them in the log.", "id": "f6142:c0:m19"} {"signature": "def _run_train(self, epoch, train_set, train_size=None):", "body": "self.network.train_logger.record_epoch(epoch + )costs = self.train_step(train_set, train_size)if not epoch % self.config.monitor_frequency:self.report(dict(costs), \"\", epoch)self.last_run_costs = costsreturn costs", "docstring": "Run one training iteration.", "id": "f6142:c0:m16"} {"signature": "def add_iter_controllers(self, *controllers):", "body": "for controller in controllers:if isinstance(controller, TrainingController):controller.bind(self)self._iter_controllers.append(controller)", "docstring": "Add iteration callbacks function (receives an argument of the trainer).\n:param controllers: can be a `TrainingController` or a function.\n:type funcs: list of TrainingContoller", "id": "f6142:c0:m11"} {"signature": "def current_epoch(self):", "body": "return self._epoch", "docstring": "Get current epoch.", "id": "f6142:c0:m23"} {"signature": "def exit(self):", "body": "self._ended = True", "docstring": "End the training.", "id": "f6142:c0:m10"} {"signature": "def __init__(self, network, config=None):", "body": "super(CustomizeTrainer, self).__init__(network, config)", "docstring": "Basic neural network trainer.\n:type network: deepy.NeuralNetwork\n:type config: deepy.conf.TrainerConfig\n:return:", "id": "f6144:c0:m0"} {"signature": "def get_gradients(self, params):", "body": "return T.grad(self.cost, params)", "docstring": "Get gradients from given parameters.", "id": "f6146:c0:m4"} {"signature": "def learning_function(self):", "body": "network_updates = list(self.network.updates) + list(self.network.training_updates)learning_updates = list(self._learning_updates())update_list = network_updates + learning_updateslogging.info(\"\" % \"\".join(map(str, [x[] for x in network_updates])))logging.info(\"\" % \"\".join(map(str, [x[] for x in learning_updates])))variables = self.network.input_variables + self.network.target_variablesgivens = Nonereturn theano.function(variables,map(lambda v: theano.Out(v, borrow=True), self.training_variables),updates=update_list, allow_input_downcast=True,mode=self.config.get(\"\", None),givens=givens)", "docstring": "Get the learning function.\n:param func:\n:return:", "id": "f6146:c0:m6"} {"signature": "def invoke(self):", "body": "self._counter += if self._counter % self._freq == :cnt = sum_map = defaultdict(float)for x in self._trainer.get_data(self._data_split):val_map = self.run(x)if not isinstance(val_map, dict):raise Exception(\"\")for k, val in val_map.items():sum_map[k] += valcnt += for k in sum_map:sum_map[k] /= cntnew_best = self.compare(sum_map)self._trainer.report(sum_map, self._data_split, new_best=new_best)if new_best:self._trainer.save_checkpoint(self._save_path)", "docstring": "This function will be called after each iteration.", "id": "f6147:c1:m5"} {"signature": "def compare(self, cost_map):", "body": "cri_val = cost_map[self._criteria]if self._best_criteria is None:self._best_criteria = cri_valreturn Trueelse:if self._smaller_is_better and cri_val < self._best_criteria:self._best_criteria = cri_valreturn Trueelif not self._smaller_is_better and cri_val > self._best_criteria:self._best_criteria = cri_valreturn Trueelse:return False", "docstring": "Compare to previous records and return whether the given cost is a new best.\n:return: True if the given cost is a new best", "id": "f6147:c1:m1"} {"signature": "def compute_context_vector(self, prev_state, inputs, precomputed_values=None, mask=None):", "body": "precomputed_values = precomputed_values if precomputed_values else self.precompute(inputs)align_weights = self.compute_alignments(prev_state, precomputed_values, mask)context_vector = T.sum(align_weights[:, :, None] * inputs, axis=)return context_vector", "docstring": "Compute the context vector with soft attention.", "id": "f6148:c0:m4"} {"signature": "def register_monitors(self, *monitors):", "body": "for key, node in monitors:if key not in self._registered_monitors:node *= self.training_monitors.append((key, node))self.testing_monitors.append((key, node))self._registered_monitors.add(key)", "docstring": "Register monitors they should be tuple of name and Theano variable.", "id": "f6164:c0:m12"} {"signature": "def register_training_callbacks(self, *callbacks):", "body": "self.training_callbacks.extend(callbacks)", "docstring": "Register callback for each iteration in the training.", "id": "f6164:c0:m15"} {"signature": "def register_external_inputs(self, *variables):", "body": "self.external_inputs.extend(variables)", "docstring": "Register external input variables.", "id": "f6164:c0:m13"} {"signature": "def register_parameters(self, *parameters):", "body": "for param in parameters:self.parameter_count += np.prod(param.get_value().shape)self.parameters.extend(parameters)", "docstring": "Register parameters.", "id": "f6164:c0:m8"} {"signature": "def __init__(self, *layers, **kwargs):", "body": "name = kwargs[''] if '' in kwargs else \"\".format(self._BLOCK_COUNT + )super(Block, self).__init__(name)self._BLOCK_COUNT += self.layers = list(layers)self.fixed = False", "docstring": "Create a new parameter block with some layers.\nYou can also specify a name through kwargs.", "id": "f6169:c0:m0"} {"signature": "def register_layer(self, layer):", "body": "if self.fixed:raise Exception(\"\")self.layers.append(layer)", "docstring": "Register one connected layer.\n:type layer: NeuralLayer", "id": "f6169:c0:m4"} {"signature": "def register(self, *layers):", "body": "for layer in layers:self.register_layer(layer)", "docstring": "Register many connected layers.\n:type layers: list of NeuralLayer", "id": "f6169:c0:m3"} {"signature": "@neural_computationdef get_initial_states(self, input_var, init_state=None):", "body": "initial_states = {}for state in self.state_names:if state != \"\" or not init_state:if self._input_type == '' and input_var.ndim == :init_state = T.alloc(np.cast[env.FLOATX](), self.hidden_size)else:init_state = T.alloc(np.cast[env.FLOATX](), input_var.shape[], self.hidden_size)initial_states[state] = init_statereturn initial_states", "docstring": ":type input_var: T.var\n:rtype: dict", "id": "f6174:c0:m6"} {"signature": "@neural_computationdef get_step_inputs(self, input_var, states=None, mask=None, additional_inputs=None):", "body": "step_inputs = {}if self._input_type == \"\":if not additional_inputs:additional_inputs = []if mask:step_inputs[''] = mask.dimshuffle(, )step_inputs.update(self.merge_inputs(input_var, additional_inputs=additional_inputs))else:if additional_inputs:step_inputs.update(self.merge_inputs(None, additional_inputs=additional_inputs))if states:for name in self.state_names:step_inputs[name] = states[name]return step_inputs", "docstring": ":type input_var: T.var\n:rtype: dict", "id": "f6174:c0:m7"} {"signature": "def switch_training(self, flag):", "body": "if self._is_training == flag: returnself._is_training = flagif flag:self._training_flag.set_value()else:self._training_flag.set_value()", "docstring": "Switch training mode.\n:param flag: switch on training mode when flag is True.", "id": "f6179:c0:m2"} {"signature": "@neural_computationdef iftrain(self, then_branch, else_branch):", "body": "return ifelse(self._training_flag, then_branch, else_branch, name=\"\")", "docstring": "Execute `then_branch` when training.", "id": "f6179:c0:m1"} {"signature": "def __init__(self, seed=DEFAULT_SEED):", "body": "if seed != self.DEFAULT_SEED:self._seed = seedelif '' in os.environ:self._seed = int(os.environ[''])else:self._seed = self.DEFAULT_SEEDif self._seed != self.DEFAULT_SEED:logging.info(\"\" % self._seed)self._numpy_rand = np.random.RandomState(seed=self._seed)self._theano_rand = RandomStreams(seed=self._seed)self._shared_rand = SharedRandomStreams(seed=self._seed)self._default_initializer = None", "docstring": "Initialize seed and global random variables.", "id": "f6183:c0:m0"} {"signature": "def _scan_step(self, vars):", "body": "from neural_var import NeuralVariableif not self._loop_vars:raise Exception(\"\")replace_map = {}for k, var in vars.items():if var is not None:replace_map[self._dummy_nodes[k].tensor] = var.tensoroutputs = {}for k in self._outputs:if k not in self._loop_vars:raise Exception(\"\".format(k))output_node = theano.clone(self._loop_vars[k].tensor, replace_map)outputs[k] = NeuralVariable(output_node, self._loop_vars[k].dim())return outputs", "docstring": "Internal scan with dummy input variables.", "id": "f6184:c1:m4"} {"signature": "@neural_computationdef disconnect(self, x):", "body": "return disconnected_grad(x)", "docstring": "Disconnect a variable from backpropagation.", "id": "f6186:c0:m10"} {"signature": "def end(self):", "body": "self.end_time = time.time()", "docstring": "Stop the timer.", "id": "f6189:c0:m1"} {"signature": "def sample(self, shape):", "body": "raise NotImplementedError", "docstring": "Sample parameters with given shape.", "id": "f6190:c0:m1"} {"signature": "def __init__(self, uniform=False, seed=None):", "body": "super(KaimingHeInitializer, self).__init__(seed)self.uniform = uniform", "docstring": "Parameters:\n uniform - uniform distribution, default Gaussian\n seed - random seed", "id": "f6190:c5:m0"} {"signature": "@staticmethoddef dump(iterable_to_pickle, file_obj):", "body": "for elt in iterable_to_pickle:StreamPickler.dump_one(elt, file_obj)", "docstring": "dump contents of an iterable iterable_to_pickle to file_obj, a file\nopened in write mode", "id": "f6194:c0:m0"} {"signature": "@staticmethoddef load(file_obj):", "body": "cur_elt = []for line in file_obj:cur_elt.append(line)if line == '':pickled_elt_str = ''.join(cur_elt)cur_elt = []try:elt = loads(pickled_elt_str)except ValueError:continueyield elt", "docstring": "load contents from file_obj, returning a generator that yields one\nelement at a time", "id": "f6194:c0:m2"} {"signature": "@neural_computationdef activate(var, method):", "body": "from activations import get_activationreturn get_activation(method)(var)", "docstring": "An activation function.\n:param var: input var\n:param method: type of activation, such as `relu`,`tanh`,`sigmoid`", "id": "f6200:m2"} {"signature": "def handle_control(self, req, worker_id, req_info):", "body": "if self.start_time is None: self.start_time = time.time()response = \"\"if req == '':if self.num_train_batches == :response = \"\"elif self._done:response = \"\"self.worker_is_done(worker_id)elif self._evaluating:response = ''elif not self.batch_pool:if self._train_costs:with self._lock:sys.stdout.write(\"\")sys.stdout.flush()mean_costs = []for i in range(len(self._training_names)):mean_costs.append(np.mean([c[i] for c in self._train_costs]))self.log(\"\".format(self.epoch,self.get_monitor_string(zip(self._training_names, mean_costs))))response = {'': None, '': self._best_valid_cost}self._evaluating = Trueelse:if worker_id not in self.prepared_worker_pool:response = {\"\": self.feed_hyperparams()}self.prepared_worker_pool.add(worker_id)elif self._iters_from_last_valid >= self._valid_freq:response = {'': None, '': self._best_valid_cost}self._iters_from_last_valid = else:response = {\"\": self.feed_batches()}elif '' in req:with self._lock:self._evaluating = Falsesys.stdout.write(\"\")sys.stdout.flush()if '' in req and req['']:self.log(\"\".format(self.epoch,self.get_monitor_string(req['']),worker_id))if '' in req and req['']:valid_J = req[''][][]if valid_J < self._best_valid_cost:self._best_valid_cost = valid_Jstar_str = \"\"else:star_str = \"\"self.log(\"\".format(self.epoch,self.get_monitor_string(req['']),star_str,worker_id))continue_training = self.prepare_epoch()self._epoch_start_time = time.time()if not continue_training:self._done = Trueself.log(\"\".format(time.time() - self.start_time))response = \"\"elif '' in req:with self._lock:sys.stdout.write(\"\")sys.stdout.flush()if '' in req:valid_J = req[''][][]if valid_J < self._best_valid_cost:self._best_valid_cost = valid_Jstar_str = \"\"else:star_str = \"\"self.log(\"\".format(self.get_monitor_string(req['']),star_str,worker_id))elif '' in req:costs = req['']self._train_costs.append(costs)sys.stdout.write(\"\" % (self._current_iter * / self.num_train_batches,costs[], float(len(self._train_costs) * self.sync_freq) / (time.time() - self._epoch_start_time)))sys.stdout.flush()elif '' in req:self.num_train_batches = req['']elif '' in req:response = self._easgd_alphaelif '' in req:response = {\"\": self.feed_hyperparams()}elif '' in req:with self._lock:sys.stdout.write(\"\")sys.stdout.flush()self.log(\"\".format(worker_id))if self.epoch == :schedule_params = req['']sch_str = \"\".join(\"\".format(a, b) for (a, b) in schedule_params.items())self.log(\"\".format(sch_str))for key, val in schedule_params.items():if not val: continueif key == '':self._lr = valelif key == '':self.epoch_start_halving = valelif key == '':self._halving_freq = valelif key == '':self.end_at = valelif key == '':self.sync_freq = valelif key == '':self._valid_freq = valelif '' in req:self._training_names = req['']self._evaluation_names = req['']return response", "docstring": "Handles a control_request received from a worker.\nReturns:\n string or dict: response\n\n 'stop' - the worker should quit\n 'wait' - wait for 1 second\n 'eval' - evaluate on valid and test set to start a new epoch\n 'sync_hyperparams' - set learning rate\n 'valid' - evaluate on valid and test set, then save the params\n 'train' - train next batches", "id": "f6207:c0:m6"} {"signature": "def __init__(self, logger=None):", "body": "object.__setattr__(self, \"\", {})object.__setattr__(self, \"\", set())object.__setattr__(self, \"\", set())object.__setattr__(self, \"\", logger)", "docstring": "Create a general config", "id": "f6210:c0:m0"} {"signature": "def prepare(self):", "body": "self.output_dim = self.encoder = Chain(self.input_dim).stack(Dense(self.internal_layer_size, ''))self.decoder = Chain(self.internal_layer_size).stack(Dense(self.input_dim))self.classifier = Chain(self.internal_layer_size).stack(Dense(, ''),Dense(self.output_dim),Softmax())self.register_inner_layers(self.encoder, self.decoder, self.classifier)self.target_input = T.ivector('')self.register_external_inputs(self.target_input)", "docstring": "All codes that create parameters should be put into 'setup' function.", "id": "f6226:c0:m1"} {"signature": "def _refined_glimpse_sensor(self, x_t, l_p):", "body": "l_p = l_p * + - l_p = T.cast(T.round(l_p), \"\")l_p = l_p * (l_p >= )l_p = l_p * (l_p < ) + (l_p >= ) * glimpse_1 = x_t[l_p[]: l_p[] + ][:, l_p[]: l_p[] + ]return glimpse_1", "docstring": "Parameters:\n x_t - 28x28 image\n l_p - 2x1 focus vector\nReturns:\n 7*14 matrix", "id": "f6236:c0:m3"} {"signature": "def _location_network(self, h_t):", "body": "return T.dot(h_t, self.W_l)", "docstring": "Parameters:\n h_t - 256x1 vector\nReturns:\n 2x1 focus vector", "id": "f6236:c0:m6"} {"signature": "def _glimpse_network(self, x_t, l_p):", "body": "sensor_output = self._refined_glimpse_sensor(x_t, l_p)sensor_output = T.flatten(sensor_output)h_g = self._relu(T.dot(sensor_output, self.W_g0))h_l = self._relu(T.dot(l_p, self.W_g1))g = self._relu(T.dot(h_g, self.W_g2_hg) + T.dot(h_l, self.W_g2_hl))return g", "docstring": "Parameters:\n x_t - 28x28 image\n l_p - 2x1 focus vector\nReturns:\n 4x12 matrix", "id": "f6242:c0:m5"} {"signature": "@classmethoddef from_mapping(cls: Type[\"\"], mapping: Optional[Mapping[str, Any]] = None, **kwargs: Any) -> \"\":", "body": "mappings: Dict[str, Any] = {}if mapping is not None:mappings.update(mapping)mappings.update(kwargs)config = cls()for key, value in mappings.items():try:setattr(config, key, value)except AttributeError:passreturn config", "docstring": "Create a configuration from a mapping.\n\n This allows either a mapping to be directly passed or as\n keyword arguments, for example,\n\n .. code-block:: python\n\n config = {'keep_alive_timeout': 10}\n Config.from_mapping(config)\n Config.form_mapping(keep_alive_timeout=10)\n\n Arguments:\n mapping: Optionally a mapping object.\n kwargs: Optionally a collection of keyword arguments to\n form a mapping.", "id": "f6274:c1:m13"} {"signature": "async def serve(app: ASGIFramework,config: Config,*,task_status: trio._core._run._TaskStatus = trio.TASK_STATUS_IGNORED,) -> None:", "body": "if config.debug:warnings.warn(\"\", Warning)if config.workers != :warnings.warn(\"\", Warning)if config.worker_class != \"\":warnings.warn(\"\", Warning)await worker_serve(app, config, task_status=task_status)", "docstring": "Serve an ASGI framework app given the config.\n\n This allows for a programmatic way to serve an ASGI framework, it\n can be used via,\n\n .. code-block:: python\n\n trio.run(partial(serve, app, config))\n\n It is assumed that the event-loop is configured before calling\n this function, therefore configuration values that relate to loop\n setup or process setup are ignored.", "id": "f6279:m0"} {"signature": "async def asgi_send(self, message: dict) -> None:", "body": "if message[\"\"] == \"\" and self.state == ASGIHTTPState.REQUEST:self.response = messageelif message[\"\"] == \"\" and self.state in {ASGIHTTPState.REQUEST,ASGIHTTPState.RESPONSE,}:if self.state == ASGIHTTPState.REQUEST:headers = build_and_validate_headers(self.response[\"\"])headers.extend(self.response_headers())await self.asend(h11.Response(status_code=int(self.response[\"\"]), headers=headers))self.state = ASGIHTTPState.RESPONSEif (not suppress_body(self.scope[\"\"], int(self.response[\"\"]))and message.get(\"\", b\"\") != b\"\"):await self.asend(h11.Data(data=bytes(message[\"\"])))if not message.get(\"\", False):if self.state != ASGIHTTPState.CLOSED:await self.asend(h11.EndOfMessage())await self.asgi_put({\"\": \"\"})self.state = ASGIHTTPState.CLOSEDelse:raise UnexpectedMessage(self.state, message[\"\"])", "docstring": "Called by the ASGI instance to send a message.", "id": "f6293:c0:m9"} {"signature": "async def asgi_receive(self) -> dict:", "body": "pass", "docstring": "Called by the ASGI instance to receive a message.", "id": "f6297:c6:m1"} {"signature": "async def asgi_put(self, message: dict) -> None:", "body": "pass", "docstring": "Called by the ASGI server to put a message to the ASGI instance.\n\n See asgi_receive as the get to this put.", "id": "f6297:c6:m0"} {"signature": "@classmethoddef validate_args(cls, tag_name, *args, **kwargs):", "body": "if cls.min_args is not None and len(args) < cls.min_args:if cls.min_args == :raise TemplateSyntaxError(\"\".format(tag_name, cls.min_args))else:raise TemplateSyntaxError(\"\".format(tag_name, cls.min_args))if cls.max_args is not None and len(args) > cls.max_args:if cls.max_args == :if cls.allowed_kwargs:raise TemplateSyntaxError(\"\".format(tag_name, cls.allowed_kwargs[]))else:raise TemplateSyntaxError(\"\".format(tag_name))elif cls.max_args == :raise TemplateSyntaxError(\"\".format(tag_name, cls.max_args))else:raise TemplateSyntaxError(\"\".format(tag_name, cls.max_args))", "docstring": "Validate the syntax of the template tag.", "id": "f6336:c0:m6"} {"signature": "def get_request(self, context):", "body": "if '' not in context:raise ImproperlyConfigured(\"\"\"\".format(self.tag_name))return context['']", "docstring": "Fetch the request from the context.\n\nThis enforces the use of a RequestProcessor, e.g.\n::\n\n render_to_response(\"page.html\", context, context_instance=RequestContext(request))", "id": "f6336:c0:m7"} {"signature": "def get_template_name(self, *tag_args, **tag_kwargs):", "body": "return tag_kwargs.get('', self.template_name)", "docstring": "Get the template name, by default using the :attr:`template_name` attribute.", "id": "f6336:c1:m1"} {"signature": "def render_tag(self, context, *tag_args, **tag_kwargs):", "body": "if self.as_var:return BaseAssignmentNode.render_tag(self, context, *tag_args, **tag_kwargs)else:return BaseInclusionNode.render_tag(self, context, *tag_args, **tag_kwargs)", "docstring": "Rendering of the tag. It either assigns the value as variable, or renders it.", "id": "f6336:c4:m1"} {"signature": "@propertydef cpu_15min_load(self):", "body": "if self._data is not None:return self._data[\"\"][\"\"]", "docstring": "Average CPU load past 15 minutes", "id": "f6343:c1:m8"} {"signature": "def _get_network(self, network_id):", "body": "if self._data is not None:for network in self._data[\"\"]:if network[\"\"] == network_id:return network", "docstring": "Function to get specific network (eth0, total, etc)", "id": "f6343:c1:m16"} {"signature": "@propertydef cpu_other_load(self):", "body": "if self._data is not None:return self._data[\"\"][\"\"]", "docstring": "Other' percentage of the total cpu load", "id": "f6343:c1:m2"} {"signature": "def memory_available_swap(self, human_readable=True):", "body": "if self._data is not None:return_data = int(self._data[\"\"][\"\"]) * if human_readable:return SynoFormatHelper.bytes_to_readable(return_data)else:return return_data", "docstring": "Total Available Memory Swap", "id": "f6343:c1:m11"} {"signature": "@staticmethoddef bytes_to_terrabytes(num):", "body": "var_tb = num / / / / return round(var_tb, )", "docstring": "Converts bytes to terrabytes", "id": "f6343:c0:m3"} {"signature": "def memory_size(self, human_readable=True):", "body": "if self._data is not None:return_data = int(self._data[\"\"][\"\"]) * if human_readable:return SynoFormatHelper.bytes_to_readable(return_data)else:return return_data", "docstring": "Total Memory Size of Synology DSM", "id": "f6343:c1:m10"} {"signature": "@propertydef cpu_system_load(self):", "body": "if self._data is not None:return self._data[\"\"][\"\"]", "docstring": "System' percentage of the total cpu load", "id": "f6343:c1:m4"} {"signature": "def memory_available_real(self, human_readable=True):", "body": "if self._data is not None:return_data = int(self._data[\"\"][\"\"]) * if human_readable:return SynoFormatHelper.bytes_to_readable(return_data)else:return return_data", "docstring": "Real available memory", "id": "f6343:c1:m13"} {"signature": "def _encode_credentials(self):", "body": "auth = {'': self.username,'': self.password,}return urlencode(auth)", "docstring": "Encode user credentials to support special characters.", "id": "f6343:c3:m2"} {"signature": "def disk_name(self, disk):", "body": "disk = self._get_disk(disk)if disk is not None:return disk[\"\"]", "docstring": "The name of this disk", "id": "f6343:c2:m13"} {"signature": "def _execute_get_url(self, request_url, append_sid=True):", "body": "self._debuglog(\"\" + request_url + \"\")if append_sid:self._debuglog(\"\" +self.access_token + \"\")request_url = \"\" % (request_url, self.access_token)try:resp = self._session.get(request_url)self._debuglog(\"\" + str(resp.status_code))if resp.status_code == :json_data = json.loads(resp.text)if json_data[\"\"]:self._debuglog(\"\")self._debuglog(str(json_data))return json_dataelse:if json_data[\"\"][\"\"] in {, , , }:self._debuglog(\"\" +str(json_data[\"\"][\"\"]))self._session_error = Trueelse:self._debuglog(\"\" + resp.text)else:return Noneexcept:return None", "docstring": "Function to execute and handle a GET request", "id": "f6343:c3:m5"} {"signature": "def update(self):", "body": "if self._utilisation is not None:api = \"\"url = \"\" % (self.base_url,api,self.access_token)self._utilisation.update(self._get_url(url))if self._storage is not None:api = \"\"url = \"\" % (self.base_url,api,self.access_token)self._storage.update(self._get_url(url))", "docstring": "Updates the various instanced modules", "id": "f6343:c3:m6"} {"signature": "@propertydef disks(self):", "body": "if self._data is not None:disks = []for disk in self._data[\"\"]:disks.append(disk[\"\"])return disks", "docstring": "Returns all available (internal) disks", "id": "f6343:c2:m11"} {"signature": "def volume_device_type(self, volume):", "body": "volume = self._get_volume(volume)if volume is not None:return volume[\"\"]", "docstring": "Returns the volume type (RAID1, RAID2, etc)", "id": "f6343:c2:m5"} {"signature": "@staticmethoddef bytes_to_readable(num):", "body": "if num < :return \"\"elif num < :return \"\"for unit in ['', '', '', '', '', '', '', '']:if abs(num) < :return \"\" % (num, unit)num /= return \"\" % (num, '')", "docstring": "Converts bytes to a human readable format", "id": "f6343:c0:m0"} {"signature": "def volume_size_total(self, volume, human_readable=True):", "body": "volume = self._get_volume(volume)if volume is not None:return_data = int(volume[\"\"][\"\"])if human_readable:return SynoFormatHelper.bytes_to_readable(return_data)else:return return_data", "docstring": "Total size of volume", "id": "f6343:c2:m6"} {"signature": "def volume_disk_temp_max(self, volume):", "body": "volume = self._get_volume(volume)if volume is not None:vol_disks = volume[\"\"]if vol_disks is not None:max_temp = for vol_disk in vol_disks:disk_temp = self.disk_temp(vol_disk)if disk_temp is not None and disk_temp > max_temp:max_temp = disk_tempreturn max_temp", "docstring": "Maximum temperature of all disks making up the volume", "id": "f6343:c2:m10"} {"signature": "@propertydef storage(self):", "body": "if self._storage is None:api = \"\"url = \"\" % (self.base_url,api)self._storage = SynoStorage(self._get_url(url))return self._storage", "docstring": "Getter for various Storage variables", "id": "f6343:c3:m8"} {"signature": "def disk_device(self, disk):", "body": "disk = self._get_disk(disk)if disk is not None:return disk[\"\"]", "docstring": "The mount point of this disk", "id": "f6343:c2:m14"} {"signature": "def to_python_source(classes, indent=DEFAULT_INDENT):", "body": "return header_source() + \"\" + classes_source(classes, indent)", "docstring": "Convert a set of pyschemas to executable python source code\n\n Currently supports all built-in types for basic usage.\n\n Notably not supported:\n * Maintaining class hierarchy\n * Methods, properties and non-field attributes\n * SELF-references", "id": "f6344:m0"} {"signature": "def header_source():", "body": "return (\"\"\"\"\"\")", "docstring": "Get the required header for generated source", "id": "f6344:m4"} {"signature": "def repr_vars(self):", "body": "d = OrderedDict()d[\"\"] = repr(self.nullable)d[\"\"] = repr(self.default)if self.description is not None:d[\"\"] = repr(self.description)return d", "docstring": "Return a dictionary the field definition\n\n Should contain all fields that are required for the definition of this field in a pyschema class", "id": "f6353:c5:m1"} {"signature": "def to_json_compatible(record):", "body": "d = {}for fname, f in record._fields.iteritems():val = getattr(record, fname)if val is not None:d[fname] = f.dump(val)return d", "docstring": "Dump record in json-encodable object format", "id": "f6353:m4"} {"signature": "def from_json_compatible(schema, dct):", "body": "kwargs = {}for key in dct:field_type = schema._fields.get(key)if field_type is None:raise ParseError(\"\" % (schema.__name__, key))kwargs[key] = field_type.load(dct[key])return schema(**kwargs)", "docstring": "Load from json-encodable", "id": "f6353:m5"} {"signature": "def serialize_validate(self, valid_record):", "body": "schema = jsonschema.get_root_schema_dict(valid_record)record_json = jsonschema.dumps(valid_record)record_dict = json.loads(record_json)validate(record_dict, schema)", "docstring": "Serialize and validate a record\n\n The record is dumped to JSON and then loaded into a Python dictionary,\n which is checked against the known schema", "id": "f6358:c5:m0"} {"signature": "def get_root_schema_dict(record):", "body": "state = SchemaGeneratorState()schema = get_schema_dict(record, state)del state.record_schemas[record._schema_name]if state.record_schemas:schema[''] = dict()for name, sub_schema in state.record_schemas.iteritems():schema[''][name] = sub_schemareturn schema", "docstring": "Return a root jsonschema for a given record\n\n A root schema includes the $schema attribute and all sub-record\n schemas and definitions.", "id": "f6373:m1"} {"signature": "def mr_writer(job, outputs, output_stream,stderr=sys.stderr, dumps=core.dumps):", "body": "for output in outputs:try:print(dumps(output), file=output_stream)except core.ParseError as e:print(e, file=stderr)raise", "docstring": "Writes a stream of json serialised pyschema Records to a file object\n\n Can be used as job.writer in luigi.hadoop.JobTask", "id": "f6378:m1"} {"signature": "def parse_schema_string(schema_string):", "body": "if isinstance(schema_string, str):schema_string = schema_string.decode(\"\")schema_struct = json.loads(schema_string)return AvroSchemaParser().parse_schema_struct(schema_struct)", "docstring": "Load and return a PySchema class from an avsc string", "id": "f6379:m0"} {"signature": "def select_renderer(request: web.Request, renderers: OrderedDict, force=True):", "body": "header = request.headers.get('', '')best_match = mimeparse.best_match(renderers.keys(), header)if not best_match or best_match not in renderers:if force:return tuple(renderers.items())[]else:raise web.HTTPNotAcceptablereturn best_match, renderers[best_match]", "docstring": "Given a request, a list of renderers, and the ``force`` configuration\noption, return a two-tuple of:\n(media type, render callable). Uses mimeparse to find the best media\ntype match from the ACCEPT header.", "id": "f6387:m0"} {"signature": "@contextmanagerdef add_resource_context(app: web.Application, module=None,url_prefix: str=None, name_prefix: str=None, make_resource=lambda cls: cls()):", "body": "assert isinstance(app.router, ResourceRouter), ''if isinstance(module, (str, bytes)):module = importlib.import_module(module)def get_base_name(resource, method_name, names):return names.get(method_name,app.router.get_default_handler_name(resource, method_name))default_make_resource = make_resourcedef add_route(path: str, resource, names: Mapping=None, make_resource=None):make_resource = make_resource or default_make_resourcenames = names or {}if isinstance(resource, (str, bytes)):if not module:raise ValueError('')resource_cls = getattr(module, resource)resource = make_resource(resource_cls)path = make_path(path, url_prefix)if name_prefix:supported_method_names = get_supported_method_names(resource)names = {method_name: ''.join((name_prefix, get_base_name(resource, method_name, names=names)))for method_name in supported_method_names}return app.router.add_resource_object(path, resource, names=names)yield add_route", "docstring": "Context manager which yields a function for adding multiple resources from a given module\n to an app using `ResourceRouter `.\n\n Example:\n\n .. code-block:: python\n\n # myapp/articles/views.py\n class ArticleList:\n async def get(self, request):\n return web.Response(b'article list...')\n\n class ArticleDetail:\n async def get(self, request):\n return web.Response(b'article detail...')\n\n .. code-block:: python\n\n # myapp/app.py\n from myapp.articles import views\n\n with add_resource_context(app, url_prefix='/api/') as route:\n route('/articles/', views.ArticleList())\n route('/articles/{pk}', views.ArticleDetail())\n\n app.router['ArticleList:get'].url() # /api/articles/\n app.router['ArticleDetail:get'].url(parts={'pk': 42}) # /api/articles/42\n\n If you prefer, you can also pass module and class names as strings. ::\n\n with add_resource_context(app, module='myapp.articles.views',\n url_prefix='/api/') as route:\n route('/articles/', 'ArticleList')\n route('/articles/{pk}', 'ArticleDetail')\n\n .. note::\n If passing class names, the resource classes will be instantiated with no\n arguments. You can change this behavior by overriding ``make_resource``.\n\n .. code-block:: python\n\n # myapp/authors/views.py\n class AuthorList:\n def __init__(self, db):\n self.db = db\n\n async def get(self, request):\n # Fetch authors from self.db...\n\n .. code-block:: python\n\n # myapp/app.py\n from myapp.database import db\n\n with add_resource_context(app, module='myapp.authors.views',\n url_prefix='/api/',\n make_resource=lambda cls: cls(db=db)) as route:\n route('/authors/', 'AuthorList')\n\n :param app: Application to add routes to.\n :param resource: Import path to module (str) or module object\n which contains the resource classes.\n :param url_prefix: Prefix to prepend to all route paths.\n :param name_prefix: Prefix to prepend to all route names.\n :param make_resource: Function which receives a resource class and returns\n a resource instance.", "id": "f6388:m3"} {"signature": "@register.tagdef systemjs_import(parser, token):", "body": "return SystemImportNode.handle_token(parser, token)", "docstring": "Import a Javascript module via SystemJS, bundling the app.\n\nSyntax::\n\n {% systemjs_import 'path/to/file' %}\n\nExample::\n\n {% systemjs_import 'mydjangoapp/js/myapp' %}\n\nWhich would be rendered like::\n\n \n\nwhere /static/CACHE can be configured through settings.\n\nIn DEBUG mode, the result would be\n\n ", "id": "f6412:m0"} {"signature": "def render(self, context):", "body": "module_path = self.path.resolve(context)if not settings.SYSTEMJS_ENABLED:if settings.SYSTEMJS_DEFAULT_JS_EXTENSIONS:name, ext = posixpath.splitext(module_path)if not ext:module_path = ''.format(module_path)if settings.SYSTEMJS_SERVER_URL:tpl = \"\"\"\"\"\"else:tpl = \"\"\"\"\"\"return tpl.format(app=module_path, url=settings.SYSTEMJS_SERVER_URL)rel_path = System.get_bundle_path(module_path)url = staticfiles_storage.url(rel_path)tag_attrs = {'': ''}for key, value in self.tag_attrs.items():if not isinstance(value, bool):value = value.resolve(context)tag_attrs[key] = valuereturn \"\"\"\"\"\".format(url=url, attrs=flatatt(tag_attrs))", "docstring": "Build the filepath by appending the extension.", "id": "f6412:c0:m1"} {"signature": "def load_systemjs_manifest(self):", "body": "_manifest_name = self.manifest_nameself.manifest_name = self.systemjs_manifest_namebundle_files = self.load_manifest()self.manifest_name = _manifest_namefor file, hashed_file in bundle_files.copy().items():if not self.exists(file) or not self.exists(hashed_file):del bundle_files[file]return bundle_files", "docstring": "Load the existing systemjs manifest and remove any entries that no longer\nexist on the storage.", "id": "f6415:c0:m1"} {"signature": "def __init__(self, system, app, **options):", "body": "self.system = systemself.app = appoptions.setdefault('', settings.SYSTEMJS_JSPM_EXECUTABLE)self.opts = optionsbundle_cmd = self.get_bundle_sfx_cmd() if self.opts.get('') else ''self.command = '' + bundle_cmd + ''self.stdout = self.stdin = self.stderr = subprocess.PIPE", "docstring": "Initialize a SystemBundle object.\n\n:param system: a System instance that holds the non-bundle specific\nmeta information (such as jspm version, configuration)\n\n:param app: string, the name of the JS package to bundle. This may be\nmissing the '.js' extension.\n\n:param options: dict containing the bundle-specific options. Possible\noptions:\n `jspm`: `jspm` executable (if it's not on $PATH, for example)\n `log`: logging mode for jspm, can be ok|warn|err. Only available\n for jspm >= 0.16.3\n `minify`: boolean, whether go generate minified bundles or not\n `sfx`: boolean, generate a self-executing bundle or not\n `skip-source-maps: boolean, whether to generate source maps or not", "id": "f6416:c2:m0"} {"signature": "def get_git_changeset():", "body": "repo_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))git_log = subprocess.Popen('',stdout=subprocess.PIPE, stderr=subprocess.PIPE,shell=True, cwd=repo_dir, universal_newlines=True)timestamp = git_log.communicate()[]try:timestamp = datetime.datetime.utcfromtimestamp(int(timestamp))except ValueError:return Nonereturn timestamp.strftime('')", "docstring": "Returns a numeric identifier of the latest git changeset.\n\n The result is the UTC timestamp of the changeset in YYYYMMDDHHMMSS format.\n This value isn't guaranteed to be unique, but collisions are very unlikely,\n so it's sufficient for generating the development version numbers.", "id": "f6418:m3"} {"signature": "def merge_(self, merge_dct):", "body": "for k, v in merge_dct.items():if (k in self and isinstance(self[k], dict) and isinstance(merge_dct[k], collections.Mapping)):self[k].merge_(dicto(merge_dct[k]))else:self[k] = merge_dct[k]return self", "docstring": "Recursive dict merge. Inspired by :meth:``dict.update()``, instead of\n updating only top-level keys, dict_merge recurses down into dicts nested\n to an arbitrary depth, updating keys. The ``merge_dct`` is merged into\n ``self``.\n :param self: dict onto which the merge is executed\n :param merge_dct: self merged into self\n :return: None", "id": "f6434:c0:m4"} {"signature": "def merge(dicto, other):", "body": "if not isinstance(dicto, Dicto):dicto = Dicto(dicto)if not isinstance(other, Dicto):other = Dicto(other)for k, v in other.__dict__.items():if k in dicto and isinstance(dicto[k], Dicto) and isinstance(other[k], Dicto):dicto[k] = merge(dicto[k], other[k])else:dicto[k] = other[k]return dicto", "docstring": "Recursive dict merge. Inspired by :meth:``dict.update()``, instead of\n updating only top-level keys, dict_merge recurses down into dicts nested\n to an arbitrary depth, updating keys. The ``other`` is merged into\n ``dicto``.\n :param dicto: dict onto which the merge is executed\n :param other: dict that is going to merged into dicto\n :return: None", "id": "f6436:m2"} {"signature": "def visualize(spect, frequencies, title=\"\"):", "body": "i = for freq, (index, row) in zip(frequencies[::-], enumerate(spect[::-, :])):plt.subplot(spect.shape[], , index + )if i == :plt.title(title)i += plt.ylabel(\"\".format(freq))plt.plot(row)plt.show()", "docstring": "Visualize the result of calling seg.filter_bank() for any number of filters", "id": "f6451:m0"} {"signature": "def generate_frames_as_segments(self, frame_duration_ms, zero_pad=True):", "body": "for frame in self.generate_frames(frame_duration_ms, zero_pad=zero_pad):seg = AudioSegment(pydub.AudioSegment(data=frame.bytes, sample_width=self.sample_width,frame_rate=self.frame_rate, channels=self.channels), self.name)yield seg, frame.timestamp", "docstring": "Does the same thing as `generate_frames`, but yields tuples of (AudioSegment, timestamp) instead of Frames.", "id": "f6454:c0:m23"} {"signature": "def _execute_sox_cmd(self, cmd, console_output=False):", "body": "on_windows = platform.system().lower() == \"\"def _get_random_tmp_file():if on_windows:rand_string = \"\".join(random.choice(string.ascii_uppercase + string.digits) for _ in range())tmp = self.name + \"\" + rand_stringWinTempFile = collections.namedtuple(\"\", \"\")tmp = WinTempFile(tmp)else:tmp = tempfile.NamedTemporaryFile()return tmptmp = _get_random_tmp_file()othertmp = _get_random_tmp_file()self.export(tmp.name, format=\"\")stdout = stderr = subprocess.PIPE if console_output else subprocess.DEVNULLcommand = cmd.format(inputfile=tmp.name, outputfile=othertmp.name)res = subprocess.call(command.split(''), stdout=stdout, stderr=stderr)assert res == , \"\"other = AudioSegment(pydub.AudioSegment.from_wav(othertmp.name), self.name)if on_windows:os.remove(tmp.name)os.remove(othertmp.name)else:tmp.close()othertmp.close()return other", "docstring": "Executes a Sox command in a platform-independent manner.\n\n`cmd` must be a format string that includes {inputfile} and {outputfile}.", "id": "f6454:c0:m19"} {"signature": "def filter_bank(self, lower_bound_hz=, upper_bound_hz=, nfilters=, mode=''):", "body": "data = self.to_numpy_array()if mode.lower() == '':frequencies = librosa.core.mel_frequencies(n_mels=nfilters, fmin=lower_bound_hz, fmax=upper_bound_hz)elif mode.lower() == '':frequencies = np.linspace(lower_bound_hz, upper_bound_hz, num=nfilters, endpoint=True)elif mode.lower() == '':start = np.log10(lower_bound_hz)stop = np.log10(upper_bound_hz)frequencies = np.logspace(start, stop, num=nfilters, endpoint=True, base=)else:raise ValueError(\"\".format(mode))rows = [filters.bandpass_filter(data, freq*, freq*, self.frame_rate) for freq in frequencies]rows = np.array(rows)spect = np.vstack(rows)return spect, frequencies", "docstring": "Returns a numpy array of shape (nfilters, nsamples), where each\nrow of data is the result of bandpass filtering the audiosegment\naround a particular frequency. The frequencies are\nspaced from `lower_bound_hz` to `upper_bound_hz` and are returned with\nthe np array. The particular spacing of the frequencies depends on `mode`,\nwhich can be either: 'linear', 'mel', or 'log'.\n\n.. note:: This method is an approximation of a gammatone filterbank\n until I get around to writing an actual gammatone filterbank\n function.\n\n.. code-block:: python\n\n # Example usage\n import audiosegment\n import matplotlib.pyplot as plt\n import numpy as np\n\n def visualize(spect, frequencies, title=\"\"):\n # Visualize the result of calling seg.filter_bank() for any number of filters\n i = 0\n for freq, (index, row) in zip(frequencies[::-1], enumerate(spect[::-1, :])):\n plt.subplot(spect.shape[0], 1, index + 1)\n if i == 0:\n plt.title(title)\n i += 1\n plt.ylabel(\"{0:.0f}\".format(freq))\n plt.plot(row)\n plt.show()\n\n seg = audiosegment.from_file(\"some_audio.wav\").resample(sample_rate_Hz=24000, sample_width=2, channels=1)\n spec, frequencies = seg.filter_bank(nfilters=5)\n visualize(spec, frequencies)\n\n.. image:: images/filter_bank.png\n\n:param lower_bound_hz: The lower bound of the frequencies to use in the bandpass filters.\n:param upper_bound_hz: The upper bound of the frequencies to use in the bandpass filters.\n:param nfilters: The number of filters to apply. This will determine which frequencies\n are used as well, as they are interpolated between\n `lower_bound_hz` and `upper_bound_hz` based on `mode`.\n:param mode: The way the frequencies are spaced. Options are: `linear`, in which case\n the frequencies are linearly interpolated between `lower_bound_hz` and\n `upper_bound_hz`, `mel`, in which case the mel frequencies are used,\n or `log`, in which case they are log-10 spaced.\n:returns: A numpy array of the form (nfilters, nsamples), where each row is the\n audiosegment, bandpass-filtered around a particular frequency,\n and the list of frequencies. I.e., returns (spec, freqs).", "id": "f6454:c0:m14"} {"signature": "def __getstate__(self):", "body": "return {'': self.name, '': self.seg}", "docstring": "Serializes into a dict for the pickle protocol.\n\n:returns: The dict to pickle.", "id": "f6454:c0:m28"} {"signature": "def reduce(self, others):", "body": "ret = AudioSegment(self.seg, self.name)selfdata = [self.seg._data]otherdata = [o.seg._data for o in others]ret.seg._data = b''.join(selfdata + otherdata)return ret", "docstring": "Reduces others into this one by concatenating all the others onto this one and\nreturning the result. Does not modify self, instead, makes a copy and returns that.\n\n:param others: The other AudioSegment objects to append to this one.\n:returns: The concatenated result.", "id": "f6454:c0:m26"} {"signature": "def __setstate__(self, d):", "body": "self.__dict__.update(d)", "docstring": "Deserializes from a dict for the pickle protocol.\n\n:param d: The dict to unpickle from.", "id": "f6454:c0:m29"} {"signature": "def generate_frames(self, frame_duration_ms, zero_pad=True):", "body": "Frame = collections.namedtuple(\"\", \"\")bytes_per_frame = int(self.frame_rate * (frame_duration_ms / ) * self.sample_width)offset = timestamp = frame_duration_s = (bytes_per_frame / self.frame_rate) / self.sample_widthwhile offset + bytes_per_frame < len(self.raw_data):yield Frame(self.raw_data[offset:offset + bytes_per_frame], timestamp, frame_duration_s)timestamp += frame_duration_soffset += bytes_per_frameif zero_pad:rest = self.raw_data[offset:]zeros = bytes(bytes_per_frame - len(rest))yield Frame(rest + zeros, timestamp, frame_duration_s)", "docstring": "Yields self's data in chunks of frame_duration_ms.\n\nThis function adapted from pywebrtc's example [https://github.com/wiseman/py-webrtcvad/blob/master/example.py].\n\n:param frame_duration_ms: The length of each frame in ms.\n:param zero_pad: Whether or not to zero pad the end of the AudioSegment object to get all\n the audio data out as frames. If not, there may be a part at the end\n of the Segment that is cut off (the part will be <= `frame_duration_ms` in length).\n:returns: A Frame object with properties 'bytes (the data)', 'timestamp (start time)', and 'duration'.", "id": "f6454:c0:m22"} {"signature": "def spectrogram(self, start_s=None, duration_s=None, start_sample=None, num_samples=None,window_length_s=None, window_length_samples=None, overlap=, window=('', )):", "body": "if start_s is not None and start_sample is not None:raise ValueError(\"\")if duration_s is not None and num_samples is not None:raise ValueError(\"\")if window_length_s is not None and window_length_samples is not None:raise ValueError(\"\")if window_length_s is None and window_length_samples is None:raise ValueError(\"\")if start_s is None and start_sample is None:start_sample = elif start_s is not None:start_sample = int(round(start_s * self.frame_rate))if duration_s is None and num_samples is None:num_samples = len(self.get_array_of_samples()) - int(start_sample)elif duration_s is not None:num_samples = int(round(duration_s * self.frame_rate))if window_length_s is not None:window_length_samples = int(round(window_length_s * self.frame_rate))if start_sample + num_samples > len(self.get_array_of_samples()):raise ValueError(\"\")arr = self.to_numpy_array()[start_sample:start_sample+num_samples]fs, ts, sxx = signal.spectrogram(arr, self.frame_rate, scaling='', nperseg=window_length_samples,noverlap=int(round(overlap * window_length_samples)),mode='', window=window)return fs, ts, sxx", "docstring": "Does a series of FFTs from `start_s` or `start_sample` for `duration_s` or `num_samples`.\nEffectively, transforms a slice of the AudioSegment into the frequency domain across different\ntime bins.\n\n.. code-block:: python\n\n # Example for plotting a spectrogram using this function\n import audiosegment\n import matplotlib.pyplot as plt\n\n #...\n seg = audiosegment.from_file(\"somebodytalking.wav\")\n freqs, times, amplitudes = seg.spectrogram(window_length_s=0.03, overlap=0.5)\n amplitudes = 10 * np.log10(amplitudes + 1e-9)\n\n # Plot\n plt.pcolormesh(times, freqs, amplitudes)\n plt.xlabel(\"Time in Seconds\")\n plt.ylabel(\"Frequency in Hz\")\n plt.show()\n\n.. image:: images/spectrogram.png\n\n:param start_s: The start time. Starts at the beginning if neither this nor `start_sample` is specified.\n:param duration_s: The duration of the spectrogram in seconds. Goes to the end if neither this nor\n `num_samples` is specified.\n:param start_sample: The index of the first sample to use. Starts at the beginning if neither this nor\n `start_s` is specified.\n:param num_samples: The number of samples in the spectrogram. Goes to the end if neither this nor\n `duration_s` is specified.\n:param window_length_s: The length of each FFT in seconds. If the total number of samples in the spectrogram\n is not a multiple of the window length in samples, the last window will be zero-padded.\n:param window_length_samples: The length of each FFT in number of samples. If the total number of samples in the\n spectrogram is not a multiple of the window length in samples, the last window will\n be zero-padded.\n:param overlap: The fraction of each window to overlap.\n:param window: See Scipy's spectrogram-function_.\n This parameter is passed as-is directly into the Scipy spectrogram function. It's documentation is reproduced here:\n Desired window to use. If window is a string or tuple, it is passed to get_window to generate the window values,\n which are DFT-even by default. See get_window for a list of windows and required parameters.\n If window is array_like it will be used directly as the window and its length must be\n `window_length_samples`.\n Defaults to a Tukey window with shape parameter of 0.25.\n:returns: Three np.ndarrays: The frequency values in Hz (the y-axis in a spectrogram), the time values starting\n at start time and then increasing by `duration_s` each step (the x-axis in a spectrogram), and\n the dB of each time/frequency bin as a 2D array of shape [len(frequency values), len(duration)].\n:raises ValueError: If `start_s` and `start_sample` are both specified, if `duration_s` and `num_samples` are both\n specified, if the first window's duration plus start time lead to running off the end\n of the AudioSegment, or if `window_length_s` and `window_length_samples` are either\n both specified or if they are both not specified.\n\n.. _spectrogram-function: https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.spectrogram.html", "id": "f6454:c0:m31"} {"signature": "def _update_segmentation_mask(segmentation_mask, onset_fronts, offset_fronts, onset_front_id, offset_front_id_most_overlap):", "body": "onset_front_overlap, offset_front_overlap = _get_consecutive_and_overlapping_fronts(onset_fronts, offset_fronts, onset_front_id, offset_front_id_most_overlap)onset_front = _get_front_idxs_from_id(onset_fronts, onset_front_id)offset_front = _get_front_idxs_from_id(offset_fronts, offset_front_id_most_overlap)msg = \"\".format(onset_front, offset_front, onset_front_overlap, offset_front_overlap)assert onset_front_overlap, msgassert offset_front_overlap, msgonset_front = onset_front_overlapoffset_front = offset_front_overlapflow_on, _slow_on = onset_front[]fhigh_on, _shigh_on = onset_front[-]flow_off, _slow_off = offset_front[]fhigh_off, _shigh_off = offset_front[-]flow = max(flow_on, flow_off)fhigh = min(fhigh_on, fhigh_off)for fidx, _freqchan in enumerate(segmentation_mask[flow:fhigh + , :], start=flow):assert fidx >= flow, \"\".format(fidx, flow)assert (fidx - flow) < len(onset_front), \"\".format(fidx, flow, len(onset_front), onset_front)assert (fidx - flow) < len(offset_front), \"\".format(fidx, flow, len(offset_front), offset_front)_, beg = onset_front[fidx - flow]_, end = offset_front[fidx - flow]if beg > end:end, beg = beg, endassert end >= begsegmentation_mask[fidx, beg:end + ] = onset_front_idonset_fronts[fidx, (beg + ):(end + )] = offset_fronts[fidx, (beg + ):(end + )] = nfreqs_used_in_onset_front = (fidx - flow) + indexes = np.arange(flow, fhigh + , , dtype=np.int64)onset_front_sample_idxs_across_freqs = np.array([s for _, s in onset_front])onset_front_sample_idxs_across_freqs_up_to_break = onset_front_sample_idxs_across_freqs[:nfreqs_used_in_onset_front]offset_front_sample_idxs_across_freqs = np.array([s for _, s in offset_front])offset_front_sample_idxs_across_freqs_up_to_break = offset_front_sample_idxs_across_freqs[:nfreqs_used_in_onset_front]offset_fronts[indexes[:nfreqs_used_in_onset_front], offset_front_sample_idxs_across_freqs_up_to_break] = onset_fronts[indexes[:nfreqs_used_in_onset_front], onset_front_sample_idxs_across_freqs_up_to_break] = whole_onset_front_matched = onset_front_id not in np.unique(onset_fronts)return whole_onset_front_matched", "docstring": "Returns an updated segmentation mask such that the input `segmentation_mask` has been updated by segmenting between\n`onset_front_id` and `offset_front_id`, as found in `onset_fronts` and `offset_fronts`, respectively.\n\nThis function also returns the onset_fronts and offset_fronts matrices, updated so that any fronts that are of\nless than 3 channels wide are removed.\n\nThis function also returns a boolean value indicating whether the onset channel went to completion.\n\nSpecifically, segments by doing the following:\n\n- Going across frequencies in the onset_front,\n- add the segment mask ID (the onset front ID) to all samples between the onset_front and the offset_front,\n if the offset_front is in that frequency.\n\nPossible scenarios:\n\nFronts line up completely:\n\n::\n\n | | S S S\n | | => S S S\n | | S S S\n | | S S S\n\nOnset front starts before offset front:\n\n::\n\n | |\n | | S S S\n | | => S S S\n | | S S S\n\nOnset front ends after offset front:\n\n::\n\n | | S S S\n | | => S S S\n | | S S S\n | |\n\nOnset front starts before and ends after offset front:\n\n::\n\n | |\n | | => S S S\n | | S S S\n | |\n\nThe above three options in reverse:\n\n::\n\n | |S S| |\n |S S| |S S| |S S|\n |S S| |S S| |S S|\n |S S| | |\n\nThere is one last scenario:\n\n::\n\n | |\n \\ /\n \\ /\n / \\\n | |\n\nWhere the offset and onset fronts cross one another. If this happens, we simply\nreverse the indices and accept:\n\n::\n\n |sss|\n \\sss/\n \\s/\n /s\\\n |sss|\n\nThe other option would be to destroy the offset front from the crossover point on, and\nthen search for a new offset front for the rest of the onset front.", "id": "f6457:m17"} {"signature": "def _form_onset_offset_fronts(ons_or_offs, sample_rate_hz, threshold_ms=):", "body": "threshold_s = threshold_ms / threshold_samples = sample_rate_hz * threshold_sons_or_offs = np.copy(ons_or_offs)claimed = []this_id = for frequency_index, row in enumerate(ons_or_offs[:, :]):ones = np.reshape(np.where(row == ), (-,))for top_level_frequency_one_index in ones:claimed.append((frequency_index, top_level_frequency_one_index))found_a_front = Falsefor other_frequency_index, other_row in enumerate(ons_or_offs[frequency_index + :, :], start=frequency_index + ):upper_limit_index = top_level_frequency_one_index + threshold_sampleslower_limit_index = top_level_frequency_one_index - threshold_samplesother_ones = np.reshape(np.where(other_row == ), (-,)) tmp = np.reshape(np.where((other_ones >= lower_limit_index) & (other_ones <= upper_limit_index)), (-,))other_ones = other_ones[tmp] if len(other_ones) > :unclaimed_idx = other_ones[] claimed.append((other_frequency_index, unclaimed_idx))elif len(claimed) < :ons_or_offs[frequency_index, top_level_frequency_one_index] = claimed = []break elif len(claimed) >= :found_a_front = Trueclaimed_as_indexes = tuple(np.array(claimed).T)ons_or_offs[claimed_as_indexes] = this_idthis_id += claimed = []break if len(claimed) >= :claimed_as_indexes = tuple(np.array(claimed).T)ons_or_offs[claimed_as_indexes] = this_idthis_id += claimed = []elif found_a_front:this_id += else:ons_or_offs[frequency_index, top_level_frequency_one_index] = claimed = []return ons_or_offs", "docstring": "Takes an array of onsets or offsets (shape = [nfrequencies, nsamples], where a 1 corresponds to an on/offset,\nand samples are 0 otherwise), and returns a new array of the same shape, where each 1 has been replaced by\neither a 0, if the on/offset has been discarded, or a non-zero positive integer, such that\neach front within the array has a unique ID - for example, all 2s in the array will be the front for on/offset\nfront 2, and all the 15s will be the front for on/offset front 15, etc.\n\nDue to implementation details, there will be no 1 IDs.", "id": "f6457:m8"} {"signature": "def _front_id_from_idx(front, index):", "body": "fidx, sidx = indexid = front[fidx, sidx]if id == :return -else:return id", "docstring": "Returns the front ID found in `front` at the given `index`.\n\n:param front: An onset or offset front array of shape [nfrequencies, nsamples]\n:index: A tuple of the form (frequency index, sample index)\n:returns: The ID of the front or -1 if not found in `front` and the item at `onsets_or_offsets[index]`\n is not a 1.", "id": "f6457:m18"} {"signature": "def _downsample_one_or_the_other(mask, mask_indexes, stft, stft_indexes):", "body": "assert len(mask.shape) == , \"\".format(len(mask.shape))assert len(stft.shape) == , \"\".format(len(stft.shape))if mask.shape[] > stft.shape[]:downsample_factor = mask.shape[] / stft.shape[]indexes = _get_downsampled_indexes(mask, downsample_factor)mask = mask[:, indexes]mask_indexes = np.array(indexes)elif mask.shape[] < stft.shape[]:downsample_factor = stft.shape[] / mask.shape[]indexes = _get_downsampled_indexes(stft, downsample_factor)stft = stft[:, indexes]stft_indexes = np.array(indexes)return mask, mask_indexes, stft, stft_indexes", "docstring": "Takes the given `mask` and `stft`, which must be matrices of shape `frequencies, times`\nand downsamples one of them into the other one's times, so that the time dimensions\nare equal. Leaves the frequency dimension untouched.", "id": "f6457:m33"} {"signature": "def _choose_front_id_from_candidates(candidate_offset_front_ids, offset_fronts, offsets_corresponding_to_onsets):", "body": "noverlaps = [] for offset_front_id in candidate_offset_front_ids:offset_front_f_idxs, offset_front_s_idxs = np.where(offset_fronts == offset_front_id)offset_front_idxs = [(f, i) for f, i in zip(offset_front_f_idxs, offset_front_s_idxs)]noverlap_this_id = len(set(offset_front_idxs).symmetric_difference(set(offsets_corresponding_to_onsets)))noverlaps.append((noverlap_this_id, offset_front_id))_overlapped, chosen_offset_front_id = max(noverlaps, key=lambda t: t[])return int(chosen_offset_front_id)", "docstring": "Returns a front ID which is the id of the offset front that contains the most overlap\nwith offsets that correspond to the given onset front ID.", "id": "f6457:m11"} {"signature": "def _compute_peaks_or_valleys_of_first_derivative(s, do_peaks=True):", "body": "gradient = np.nan_to_num(np.apply_along_axis(np.gradient, , s), copy=False)threshold = np.squeeze(np.nanmean(gradient, axis=) + np.nanstd(gradient, axis=))half_window = if do_peaks:indexes = [signal.argrelextrema(gradient[i, :], np.greater, order=half_window)[] for i in range(gradient.shape[])]else:indexes = [signal.argrelextrema(gradient[i, :], np.less, order=half_window)[] for i in range(gradient.shape[])]extrema = np.zeros(s.shape)for row_index, index_array in enumerate(indexes):for col_index in index_array:if do_peaks and (gradient[row_index, col_index] > threshold[row_index]):extrema[row_index, col_index] = elif not do_peaks:extrema[row_index, col_index] = return extrema, gradient", "docstring": "Takes a spectrogram and returns a 2D array of the form:\n\n0 0 0 1 0 0 1 0 0 0 1 <-- Frequency 0\n0 0 1 0 0 0 0 0 0 1 0 <-- Frequency 1\n0 0 0 0 0 0 1 0 1 0 0 <-- Frequency 2\n*** Time axis *******\n\nWhere a 1 means that the value in that time bin in the spectrogram corresponds to\na peak/valley in the first derivative.\n\nThis function is used as part of the ASA algorithm and is not meant to be used publicly.", "id": "f6457:m6"} {"signature": "def _get_corresponding_offsets(onset_fronts, onset_front_id, onsets, offsets):", "body": "corresponding_offsets = []for index in _get_front_idxs_from_id(onset_fronts, onset_front_id):offset_fidx, offset_sidx = _lookup_offset_by_onset_idx(index, onsets, offsets)corresponding_offsets.append((offset_fidx, offset_sidx))return corresponding_offsets", "docstring": "Gets the offsets that occur as close as possible to the onsets in the given onset-front.", "id": "f6457:m20"} {"signature": "def _correlate_onsets_and_offsets(onsets, offsets, gradients):", "body": "for freq_index, (ons, offs) in enumerate(zip(onsets[:, :], offsets[:, :])):indexes_of_all_ones = np.reshape(np.where(ons == ), (-,))last_idx = indexes_of_all_ones[]offs[:last_idx + ] = if len(indexes_of_all_ones > ):for next_idx in indexes_of_all_ones[:]:offset_choices = offs[last_idx:next_idx]offset_choice_indexes = np.where(offset_choices == )if not np.any(offset_choices):continueassert np.any(offset_choices), \"\".format(last_idx, next_idx)offset_choice_indexes = np.reshape(last_idx + offset_choice_indexes, (-,))assert np.all(offsets[freq_index, offset_choice_indexes])gradient_values = gradients[freq_index, offset_choice_indexes]index_of_largest_from_gradient_values = np.where(gradient_values == np.min(gradient_values))[]index_of_largest_offset_choice = offset_choice_indexes[index_of_largest_from_gradient_values]assert offsets[freq_index, index_of_largest_offset_choice] == offsets[freq_index, offset_choice_indexes] = offsets[freq_index, index_of_largest_offset_choice] = last_idx = next_idxelse:offsets[freq_index, :] = offsets[freq_index, -] = return offsets", "docstring": "Takes an array of onsets and an array of offsets, of the shape [nfrequencies, nsamples], where\neach item in these arrays is either a 0 (not an on/offset) or a 1 (a possible on/offset).\n\nThis function returns a new offsets array, where there is a one-to-one correlation between\nonsets and offsets, such that each onset has exactly one offset that occurs after it in\nthe time domain (the second dimension of the array).\n\nThe gradients array is used to decide which offset to use in the case of multiple possibilities.", "id": "f6457:m7"} {"signature": "def lowpass_filter(data, cutoff, fs, order=):", "body": "nyq = * fsnormal_cutoff = cutoff / nyqb, a = signal.butter(order, normal_cutoff, btype='', analog=False)y = signal.lfilter(b, a, data)return y", "docstring": "Does a lowpass filter over the given data.\n\n:param data: The data (numpy array) to be filtered.\n:param cutoff: The high cutoff in Hz.\n:param fs: The sample rate in Hz of the data.\n:param order: The order of the filter. The higher the order, the tighter the roll-off.\n:returns: Filtered data (numpy array).", "id": "f6458:m1"} {"signature": "async def process_succeed_response(self, request, response):", "body": "pass", "docstring": "Corresponding processing for the succeed response\n:param request: Request\n:param response: Response\n:return:", "id": "f6555:c0:m2"} {"signature": "def request(self,url: str,method: str = '',*,callback=None,encoding: typing.Optional[str] = None,headers: dict = None,metadata: dict = None,request_config: dict = None,request_session=None,**kwargs):", "body": "headers = headers or {}metadata = metadata or {}request_config = request_config or {}request_session = request_session or self.request_sessionheaders.update(self.headers.copy())request_config.update(self.request_config.copy())kwargs.update(self.kwargs.copy())return Request(url=url,method=method,callback=callback,encoding=encoding,headers=headers,metadata=metadata,request_config=request_config,request_session=request_session,**kwargs)", "docstring": "Init a Request class for crawling html", "id": "f6555:c1:m13"} {"signature": "async def _run_spider_hook(self, hook_func):", "body": "if callable(hook_func):try:aws_hook_func = hook_func(weakref.proxy(self))if isawaitable(aws_hook_func):await aws_hook_funcexcept Exception as e:self.logger.error(f'')", "docstring": "Run hook before/after spider start crawling\n:param hook_func: aws function\n:return:", "id": "f6555:c0:m0"} {"signature": "def __init__(self,middleware: typing.Union[typing.Iterable, Middleware] = None,loop=None,is_async_start: bool = False):", "body": "if not self.start_urls or not isinstance(self.start_urls,collections.Iterable):raise ValueError(\"\")self.callback_result_map = self.callback_result_map or {}self.request_config = self.request_config or {}self.headers = self.headers or {}self.metadata = self.metadata or {}self.kwargs = self.kwargs or {}self.request_config = self.request_config or {}self.is_async_start = is_async_startself.loop = loopasyncio.set_event_loop(self.loop)if isinstance(middleware, list):self.middleware = reduce(lambda x, y: x + y, middleware)else:self.middleware = middleware or Middleware()self.request_queue = asyncio.Queue()self.sem = asyncio.Semaphore(self.concurrency)", "docstring": "Init spider object.\n:param middleware: a list of or a single Middleware\n:param loop:\n:param is_async_start:", "id": "f6555:c1:m0"} {"signature": "async def handle_request(self, request: Request) -> typing.Tuple[AsyncGeneratorType, Response]:", "body": "callback_result, response = None, Noneawait self._run_request_middleware(request)try:callback_result, response = await request.fetch_callback(self.sem)except NotImplementedParseError as e:self.logger.error(e)except NothingMatchedError as e:self.logger.error(f'')except Exception as e:self.logger.error(f'')await self._run_response_middleware(request, response)await self._process_response(request=request, response=response)return callback_result, response", "docstring": "Wrap request with middleware.\n:param request:\n:return:", "id": "f6555:c1:m10"} {"signature": "def __init__(self, default: str = '', many: bool = False):", "body": "self.default = defaultself.many = many", "docstring": "Init BaseField class\nurl: http://lxml.de/index.html\n:param default: default value\n:param many: if there are many fields in one page", "id": "f6558:c0:m0"} {"signature": "def __init__(self,css_select: str = None,xpath_select: str = None,default: str = None,many: bool = False):", "body": "super(_LxmlElementField, self).__init__(default=default, many=many)self.css_select = css_selectself.xpath_select = xpath_select", "docstring": ":param css_select: css select http://lxml.de/cssselect.html\n:param xpath_select: http://www.w3school.com.cn/xpath/index.asp\n:param default: inherit\n:param many: inherit", "id": "f6558:c1:m0"} {"signature": "async def clean_title(self, value):", "body": "return str(value).strip()", "docstring": "\u6e05\u6d17\u76ee\u6807\u6570\u636e\n:param value: \u521d\u59cb\u76ee\u6807\u6570\u636e\n:return:", "id": "f6574:c0:m0"} {"signature": "def django_boolean_icon(field_val, alt_text=None, title=None):", "body": "BOOLEAN_MAPPING = {True: '', False: '', None: ''}alt_text = alt_text or BOOLEAN_MAPPING[field_val]if title is not None:title = '' % titleelse:title = ''return mark_safe('' %(settings.STATIC_URL, BOOLEAN_MAPPING[field_val], alt_text, title))", "docstring": "Return HTML code for a nice representation of true/false.", "id": "f6576:m0"} {"signature": "def ajax_editable_boolean(attr, short_description):", "body": "def _fn(self, item):return ajax_editable_boolean_cell(item, attr)_fn.allow_tags = True_fn.short_description = short_description_fn.editable_boolean_field = attrreturn _fn", "docstring": "Convenience function: Assign the return value of this method to a variable\nof your ModelAdmin class and put the variable name into list_display.\n\nExample::\n\n class MyTreeEditor(TreeEditor):\n list_display = ('__unicode__', 'active_toggle')\n\n active_toggle = ajax_editable_boolean('active', _('is active'))", "id": "f6576:m3"} {"signature": "def has_change_permission(self, request, obj=None):", "body": "if settings.TREE_EDITOR_OBJECT_PERMISSIONS:opts = self.optsr = request.user.has_perm(opts.app_label + '' + opts.get_change_permission(), obj)else:r = Truereturn r and super(TreeEditor, self).has_change_permission(request, obj)", "docstring": "Implement a lookup for object level permissions. Basically the same as\nModelAdmin.has_change_permission, but also passes the obj parameter in.", "id": "f6576:c1:m8"} {"signature": "def _collect_editable_booleans(self):", "body": "if hasattr(self, ''):returnself._ajax_editable_booleans = {}for field in self.list_display:item = getattr(self.__class__, field, None)if not item:continueattr = getattr(item, '', None)if attr:def _fn(self, instance):return [ajax_editable_boolean_cell(instance, _fn.attr)]_fn.attr = attrresult_func = getattr(item, '', _fn)self._ajax_editable_booleans[attr] = result_func", "docstring": "Collect all fields marked as editable booleans. We do not\nwant the user to be able to edit arbitrary fields by crafting\nan AJAX request by hand.", "id": "f6576:c1:m3"} {"signature": "def _populate_contigs(self, contig_id, header, cov, sequence):", "body": "gc_kwargs = self._get_gc_content(sequence, len(sequence))logger.debug(\"\".format(gc_kwargs))self.contigs[contig_id] = {\"\": header,\"\": sequence,\"\": len(sequence),\"\": cov,**gc_kwargs}", "docstring": "Inserts data from a single contig into\\\n :py:attr:`~Assembly.contigs`.\n\n By providing a contig id, the original header, the coverage that\n is parsed from the header and the sequence, this method will\n populate the :py:attr:`~Assembly.contigs` attribute.\n\n Parameters\n ----------\n contig_id : int\n Arbitrary unique contig identifier.\n header : str\n Original header of the current contig.\n cov : float\n The contig coverage, parsed from the fasta header\n sequence : str\n The complete sequence of the contig.", "id": "f6596:c0:m4"} {"signature": "def get_assembly_length(self):", "body": "return sum([vals[\"\"] for contig_id, vals in self.contigs.items()if contig_id not in self.filtered_ids])", "docstring": "Returns the length of the assembly, without the filtered contigs.\n\n Returns\n -------\n x : int\n Total length of the assembly.", "id": "f6596:c0:m8"} {"signature": "@staticmethoddef _parse_coverage(header_str):", "body": "cov = Nonefor i in header_str.split(\"\")[::-]:try:cov = float(i)breakexcept ValueError:continuereturn cov", "docstring": "Attempts to retrieve the coverage value from the header string.\n\n It splits the header by \"_\" and then screens the list backwards in\n search of the first float value. This will be interpreted as the\n coverage value. If it cannot find a float value, it returns None.\n This search methodology is based on the strings of assemblers\n like spades and skesa that put the mean kmer coverage for each\n contig in its corresponding fasta header.\n\n Parameters\n ----------\n header_str : str\n String\n\n Returns\n -------\n float or None\n The coverage value for the contig. None if it cannot find the\n value in the provide string.", "id": "f6596:c0:m2"} {"signature": "def check_summary_health(summary_file, **kwargs):", "body": "fail_sensitive = kwargs.get(\"\", [\"\",\"\",\"\",\"\"])logger.debug(\"\".format(fail_sensitive))must_pass = kwargs.get(\"\", [\"\",\"\"])logger.debug(\"\".format(must_pass))warning_fail_sensitive = kwargs.get(\"\", [\"\",\"\",])warning_must_pass = kwargs.get(\"\", [\"\"])summary_info = get_summary(summary_file)health = Truefailed = []warning = []for cat, test in summary_info.items():logger.debug(\"\".format(cat, test))if cat in fail_sensitive and test == \"\":health = Falsefailed.append(\"\".format(cat, test))logger.error(\"\"\"\".format(cat))if cat in must_pass and test != \"\":health = Falsefailed.append(\"\".format(cat, test))logger.error(\"\".format(cat))if cat in warning_fail_sensitive and test == \"\":warning.append(\"\".format(cat))logger.warning(\"\"\"\".format(cat))if cat in warning_must_pass and test != \"\":warning.append(\"\".format(cat))logger.warning(\"\"\"\".format(cat))return health, failed, warning", "docstring": "Checks the health of a sample from the FastQC summary file.\n\n Parses the FastQC summary file and tests whether the sample is good\n or not. There are four categories that cannot fail, and two that\n must pass in order for the sample pass this check. If the sample fails\n the quality checks, a list with the failing categories is also returned.\n\n Categories that cannot fail::\n\n fail_sensitive = [\n \"Per base sequence quality\",\n \"Overrepresented sequences\",\n \"Sequence Length Distribution\",\n \"Per sequence GC content\"\n ]\n\n Categories that must pass::\n\n must_pass = [\n \"Per base N content\",\n \"Adapter Content\"\n ]\n\n Parameters\n ----------\n summary_file: str\n Path to FastQC summary file.\n\n Returns\n -------\n x : bool\n Returns ``True`` if the sample passes all tests. ``False`` if not.\n summary_info : list\n A list with the FastQC categories that failed the tests. Is empty\n if the sample passes all tests.", "id": "f6597:m6"} {"signature": "def trim_range(data_file):", "body": "logger.debug(\"\")target_nuc_bias = \"\"logger.debug(\"\"\"\".format(target_nuc_bias))gather = Falsebiased = []with open(data_file) as fh:for line in fh:if line.startswith(target_nuc_bias):logger.debug(\"\".format(line))next(fh)gather = Trueelif line.startswith(\"\") and gather:logger.debug(\"\".format(line))breakelif gather:g, a, t, c = [float(x) for x in line.strip().split()[:]]gc = (g + ) / (c + )at = (a + ) / (t + )if <= gc <= and <= at <= :biased.append(False)else:biased.append(True)logger.debug(\"\".format(biased))biased_5end, biased_3end = biased[:int(len(biased)/)],biased[int(len(biased)/):][::-]logger.debug(\"\")trim_nt = [, ]trim_nt[] = get_trim_index(biased_5end)logger.debug(\"\".format(trim_nt[]))trim_nt[] = len(biased) - get_trim_index(biased_3end)logger.debug(\"\".format(trim_nt[]))return trim_nt", "docstring": "Assess the optimal trim range for a given FastQC data file.\n\n This function will parse a single FastQC data file, namely the\n *'Per base sequence content'* category. It will retrieve the A/T and G/C\n content for each nucleotide position in the reads, and check whether the\n G/C and A/T proportions are between 80% and 120%. If they are, that\n nucleotide position is marked as biased for future removal.\n\n Parameters\n ----------\n data_file: str\n Path to FastQC data file.\n\n Returns\n -------\n trim_nt: list\n List containing the range with the best trimming positions for the\n corresponding FastQ file. The first element is the 5' end trim index\n and the second element is the 3' end trim index.", "id": "f6597:m3"} {"signature": "@MainWrapperdef main(sample_id, assembly_file, coverage_bp_file=None):", "body": "logger.info(\"\")assembly_obj = Assembly(assembly_file, sample_id)logger.info(\"\")assembly_obj.get_summary_stats(\"\".format(sample_id))size_dist = [len(x) for x in assembly_obj.contigs.values()]json_dic = {\"\": [{\"\": sample_id,\"\": [{\"\": \"\",\"\": assembly_obj.summary_info[\"\"],\"\": \"\",\"\": True},{\"\": \"\",\"\": assembly_obj.summary_info[\"\"],\"\": \"\",\"\": True},]}],\"\": [{\"\": sample_id,\"\": {\"\": size_dist}}]}if coverage_bp_file:try:window = gc_sliding_data = assembly_obj.get_gc_sliding(window=window)cov_sliding_data =assembly_obj.get_coverage_sliding(coverage_bp_file,window=window)total_bp = sum([sum(x) for x in assembly_obj.contig_coverage.values()])json_dic[\"\"][][\"\"][\"\"] = {\"\": gc_sliding_data,\"\": cov_sliding_data,\"\": window,\"\": assembly_obj._get_window_labels(window),\"\": os.path.basename(assembly_file)}json_dic[\"\"][][\"\"][\"\"] = total_bpexcept:logger.error(\"\"\"\".format(traceback.format_exc()))with open(\"\", \"\") as json_report:json_report.write(json.dumps(json_dic, separators=(\"\", \"\")))with open(\"\", \"\") as status_fh:status_fh.write(\"\")", "docstring": "Main executor of the assembly_report template.\n\n Parameters\n ----------\n sample_id : str\n Sample Identification string.\n assembly_file : str\n Path to assembly file in Fasta format.", "id": "f6598:m1"} {"signature": "@staticmethoddef _get_contig_id(contig_str):", "body": "contig_id = contig_strtry:contig_id = re.search(\"\", contig_str).group()except AttributeError:passtry:contig_id = re.search(\"\", contig_str).group()except AttributeError:passreturn contig_id", "docstring": "Tries to retrieve contig id. Returns the original string if it\n is unable to retrieve the id.\n\n Parameters\n ----------\n contig_str : str\n Full contig string (fasta header)\n\n Returns\n -------\n str\n Contig id", "id": "f6598:c0:m2"} {"signature": "def get_gc_sliding(self, window=):", "body": "gc_res = []complete_seq = \"\".join(self.contigs.values()).lower()for i in range(, len(complete_seq), window):seq_window = complete_seq[i:i + window]gc_res.append(round(self._gc_prop(seq_window, len(seq_window)), ))return gc_res", "docstring": "Calculates a sliding window of the GC content for the assembly\n\n\n Returns\n -------\n gc_res : list\n List of GC proportion floats for each data point in the sliding\n window", "id": "f6598:c0:m6"} {"signature": "@MainWrapperdef main(mash_output, hash_cutoff, sample_id, assembly_file):", "body": "input_f = open(mash_output, \"\")master_dict = {}for line in input_f:tab_split = line.split(\"\")current_seq = tab_split[].strip()ref_accession = \"\".join(tab_split[].strip().split(\"\")[:])mash_dist = tab_split[].strip()hashes_list = tab_split[-].strip().split(\"\")perc_hashes = float(hashes_list[]) / float(hashes_list[])if ref_accession in master_dict.keys():current_seq += \"\".format(master_dict[ref_accession][-])if perc_hashes > float(hash_cutoff):master_dict[ref_accession] = [round( - float(mash_dist), ),round(perc_hashes, ),current_seq]send_to_output(master_dict, mash_output, sample_id, assembly_file)", "docstring": "Main function that allows to dump a mash dist txt file to a json file\n\nParameters\n----------\nmash_output: str\n A string with the input file.\nhash_cutoff: str\n the percentage cutoff for the percentage of shared hashes between query\n and plasmid in database that is allowed for the plasmid to be reported\n to the results outputs\nsample_id: str\n The name of the sample.", "id": "f6599:m1"} {"signature": "def clean_up(fastq):", "body": "for fq in fastq:rp = os.path.realpath(fq)logger.debug(\"\".format(rp))if re.match(\"\", rp):os.remove(rp)", "docstring": "Cleans the temporary fastq files. If they are symlinks, the link\nsource is removed\n\nParameters\n----------\nfastq : list\n List of fastq files.", "id": "f6600:m2"} {"signature": "@MainWrapperdef main(sample_id, fastq_pair, max_len, kmer, opts, clear, disable_rr):", "body": "logger.info(\"\")min_coverage, min_kmer_coverage = optslogger.info(\"\")kmers = set_kmers(kmer, max_len)logger.info(\"\".format(kmers))cli = [\"\",\"\",\"\",\"\",\"\",\"\",min_coverage,\"\",\"\"]if kmers:cli += [\"\".format(\"\".join([str(x) for x in kmers]))]cli += [\"\",fastq_pair[],\"\",fastq_pair[]]if disable_rr == '':cli += ['']logger.debug(\"\".format(cli))p = subprocess.Popen(cli, stdout=PIPE, stderr=PIPE)stdout, stderr = p.communicate()try:stderr = stderr.decode(\"\")stdout = stdout.decode(\"\")except (UnicodeDecodeError, AttributeError):stderr = str(stderr)stdout = str(stdout)logger.info(\"\"\"\".format(stdout))logger.info(\"\"\"\".format(stderr))logger.info(\"\".format(p.returncode))with open(\"\", \"\") as fh:if p.returncode != :fh.write(\"\")sys.exit(p.returncode)else:fh.write(\"\")if \"\" in fastq_pair[]:sample_id += \"\"info = __get_version_spades()assembly_file = \"\".format(sample_id, info[\"\"].replace(\"\", \"\"))os.rename(\"\", assembly_file)logger.info(\"\".format(assembly_file))if clear == \"\" and os.path.exists(assembly_file):clean_up(fastq_pair)", "docstring": "Main executor of the spades template.\n\n Parameters\n ----------\n sample_id : str\n Sample Identification string.\n fastq_pair : list\n Two element list containing the paired FastQ files.\n max_len : int\n Maximum read length. This value is determined in\n :py:class:`templates.integrity_coverage`\n kmer : str\n Can be either ``'auto'``, ``'default'`` or a\n sequence of space separated integers, ``'23, 45, 67'``.\n opts : List of options for spades execution. See above.\n clear : str\n Can be either 'true' or 'false'. If 'true', the input fastq files will\n be removed at the end of the run, IF they are in the working directory\n disable_rr : str\n Can either be 'true' or 'false'. If 'true', disables repeat resolution \n stage of assembling", "id": "f6600:m3"} {"signature": "def clean_up(fastq):", "body": "for fq in fastq:rp = os.path.realpath(fq)logger.debug(\"\".format(rp))if re.match(\"\", rp):os.remove(rp)", "docstring": "Cleans the temporary fastq files. If they are symlinks, the link\nsource is removed\n\nParameters\n----------\nfastq : list\n List of fastq files.", "id": "f6601:m1"} {"signature": "def evaluate_min_coverage(coverage_opt, assembly_coverage, assembly_size):", "body": "if coverage_opt == \"\":min_coverage = (assembly_coverage / assembly_size) * logger.info(\"\"\"\".format(min_coverage))if min_coverage < :logger.info(\"\"\"\")min_coverage = else:min_coverage = int(coverage_opt)logger.info(\"\".format(min_coverage))return min_coverage", "docstring": "Evaluates the minimum coverage threshold from the value provided in\n the coverage_opt.\n\n Parameters\n ----------\n coverage_opt : str or int or float\n If set to \"auto\" it will try to automatically determine the coverage\n to 1/3 of the assembly size, to a minimum value of 10. If it set\n to a int or float, the specified value will be used.\n assembly_coverage : int or float\n The average assembly coverage for a genome assembly. This value\n is retrieved by the `:py:func:parse_coverage_table` function.\n assembly_size : int\n The size of the genome assembly. This value is retrieved by the\n `py:func:get_assembly_size` function.\n\n Returns\n -------\n x: int\n Minimum coverage threshold.", "id": "f6602:m7"} {"signature": "def filter_assembly(assembly_file, minimum_coverage, coverage_info,output_file):", "body": "write_flag = Falsewith open(assembly_file) as fh, open(output_file, \"\") as out_fh:for line in fh:if line.startswith(\">\"):write_flag = Falseheader = line.strip()[:]contig_cov = coverage_info[header][\"\"]if contig_cov >= minimum_coverage:write_flag = Trueout_fh.write(line)elif write_flag:out_fh.write(line)", "docstring": "Generates a filtered assembly file.\n\n This function generates a filtered assembly file based on an original\n assembly and a minimum coverage threshold.\n\n Parameters\n ----------\n assembly_file : str\n Path to original assembly file.\n minimum_coverage : int or float\n Minimum coverage required for a contig to pass the filter.\n coverage_info : OrderedDict or dict\n Dictionary containing the coverage information for each contig.\n output_file : str\n Path where the filtered assembly file will be generated.", "id": "f6602:m3"} {"signature": "def parse_coverage_table(coverage_file):", "body": "coverage_dict = OrderedDict()total_cov = with open(coverage_file) as fh:for line in fh:contig, cov = line.strip().split()coverage_dict[contig] = {\"\": int(cov)}total_cov += int(cov)logger.debug(\"\"\"\".format(contig, cov))return coverage_dict, total_cov", "docstring": "Parses a file with coverage information into objects.\n\n This function parses a TSV file containing coverage results for\n all contigs in a given assembly and will build an ``OrderedDict``\n with the information about their coverage and length. The length\n information is actually gathered from the contig header using a\n regular expression that assumes the usual header produced by Spades::\n\n contig_len = int(re.search(\"length_(.+?)_\", line).group(1))\n\n Parameters\n ----------\n coverage_file : str\n Path to TSV file containing the coverage results.\n\n Returns\n -------\n coverage_dict : OrderedDict\n Contains the coverage and length information for each contig.\n total_size : int\n Total size of the assembly in base pairs.\n total_cov : int\n Sum of coverage values across all contigs.", "id": "f6602:m2"} {"signature": "def get_assembly_length(self):", "body": "return sum([vals[\"\"] for contig_id, vals in self.contigs.items()if contig_id not in self.filtered_ids])", "docstring": "Returns the length of the assembly, without the filtered contigs.\n\n Returns\n -------\n x : int\n Total length of the assembly.", "id": "f6604:c0:m7"} {"signature": "def write_report(self, output_file):", "body": "logger.debug(\"\".format(output_file))with open(output_file, \"\") as fh:for contig_id, vals in self.report.items():fh.write(\"\".format(contig_id, vals))", "docstring": "Writes a report with the test results for the current assembly\n\n Parameters\n ----------\n output_file : str\n Name of the output assembly file.", "id": "f6604:c0:m9"} {"signature": "def get_json_info(fields, header):", "body": "json_dic = dict((x, y) for x, y in zip(header, fields))return json_dic", "docstring": "Parameters\n----------\nfields\n\nReturns\n-------", "id": "f6605:m0"} {"signature": "def parse_log(log_file):", "body": "template = OrderedDict([(\"\", ),(\"\", ),(\"\", ),(\"\", ),(\"\", ),(\"\", )])with open(log_file) as fh:for line in fh:fields = [int(x) for x in line.strip().split()[-:]]if not fields[]:template[\"\"] += template[\"\"] += fields[]template[\"\"] += fields[]template[\"\"] += fields[] + fields[]template[\"\"] += fields[]total_len = template[\"\"] + template[\"\"]if total_len:template[\"\"] = round((template[\"\"] / total_len) * , )else:template[\"\"] = return template", "docstring": "Retrieves some statistics from a single Trimmomatic log file.\n\n This function parses Trimmomatic's log file and stores some trimming\n statistics in an :py:class:`OrderedDict` object. This object contains\n the following keys:\n\n - ``clean_len``: Total length after trimming.\n - ``total_trim``: Total trimmed base pairs.\n - ``total_trim_perc``: Total trimmed base pairs in percentage.\n - ``5trim``: Total base pairs trimmed at 5' end.\n - ``3trim``: Total base pairs trimmed at 3' end.\n\n Parameters\n ----------\n log_file : str\n Path to trimmomatic log file.\n\n Returns\n -------\n x : :py:class:`OrderedDict`\n Object storing the trimming statistics.", "id": "f6606:m1"} {"signature": "def merge_default_adapters():", "body": "default_adapters = [os.path.join(ADAPTERS_PATH, x) for x inos.listdir(ADAPTERS_PATH)]filepath = os.path.join(os.getcwd(), \"\")with open(filepath, \"\") as fh,fileinput.input(default_adapters) as in_fh:for line in in_fh:fh.write(\"\".format(line, \"\"))return filepath", "docstring": "Merges the default adapters file in the trimmomatic adapters directory\n\n Returns\n -------\n str\n Path with the merged adapters file.", "id": "f6606:m5"} {"signature": "def set_kmers(kmer_opt, max_read_len):", "body": "logger.debug(\"\".format(kmer_opt))if kmer_opt == \"\":if max_read_len >= :kmers = [, , , , ]else:kmers = [, , , , ]logger.debug(\"\"\"\".format(max_read_len, kmers))elif len(kmer_opt.split()) > :kmers = kmer_opt.split()if kmers[]< or kmers[-]> or is_odd(kmers):kmers = []logger.debug(\"\"\"\")else:logger.debug(\"\".format(kmers))else:kmers = []logger.debug(\"\"\"\")return kmers", "docstring": "Returns a kmer list based on the provided kmer option and max read len.\n\n Parameters\n ----------\n kmer_opt : str\n The k-mer option. Can be either ``'auto'``, ``'default'`` or a\n sequence of space separated integers, ``'23, 45, 67'``.\n max_read_len : int\n The maximum read length of the current sample.\n\n Returns\n -------\n kmers : list\n List of k-mer values that will be provided to megahit.", "id": "f6608:m2"} {"signature": "@MainWrapperdef main(fastq_pair, adapter_file, cpus):", "body": "logger.info(\"\")if os.path.exists(adapter_file):logger.info(\"\".format(adapter_file))adapters = convert_adatpers(adapter_file)else:logger.info(\"\"\"\".format(adapter_file))adapters = Nonecli = [\"\",\"\",\"\",\"\",\"\",\"\",str(cpus)]if adapters:cli += [\"\", \"\".format(adapters)]cli += fastq_pairlogger.debug(\"\".format(cli))p = subprocess.Popen(cli, stdout=PIPE, stderr=PIPE, shell=False)stdout, stderr = p.communicate()try:stderr = stderr.decode(\"\")except (UnicodeDecodeError, AttributeError):stderr = str(stderr)logger.info(\"\"\"\".format(stdout))logger.info(\"\"\"\".format(stderr))logger.info(\"\".format(p.returncode))logger.info(\"\")with open(\"\", \"\") as status_fh:for fastq in fastq_pair:fpath = join(fastq.rsplit(\"\", )[] + \"\",\"\")logger.debug(\"\".format(fpath))if not exists(fpath):logger.warning(\"\".format(fpath))status_fh.write(\"\")returnlogger.debug(\"\".format(fpath))status_fh.write(\"\")logger.info(\"\")for i, fastq in enumerate(fastq_pair):fastqc_dir = fastq.rsplit(\"\", )[] + \"\"summary_file = join(fastqc_dir, \"\")logger.debug(\"\".format(summary_file))fastqc_data_file = join(fastqc_dir, \"\")logger.debug(\"\".format(fastqc_data_file))os.rename(fastqc_data_file, \"\".format(i + ))os.rename(summary_file, \"\".format(i + ))", "docstring": "Main executor of the fastq template.\n\n Parameters\n ----------\n fastq_pair : list\n Two element list containing the paired FastQ files.\n adapter_file : str\n Path to adapters file.\n cpus : int or str\n Number of cpu's that will be by FastQC.", "id": "f6611:m2"} {"signature": "@MainWrapperdef main(sample_id, assembly, min_size):", "body": "logger.info(\"\")f_open = open(assembly, \"\")entry = (x[] for x in groupby(f_open, lambda line: line[] == \">\"))success = for header in entry:headerStr = header.__next__()[:].strip()seq = \"\".join(s.strip() for s in entry.__next__())if len(seq) >= min_size:with open(sample_id + '' + headerStr.replace(\"\",\"\").replace(\"\",\"\") + '', \"\") as output_file:output_file.write(\">\" + sample_id + \"\" + headerStr.replace(\"\",\"\").replace(\"\",\"\") + \"\" + seq + \"\")success += f_open.close()logger.info(\"\".format(success))", "docstring": "Main executor of the split_fasta template.\n\n Parameters\n ----------\n sample_id : str\n Sample Identification string.\n assembly : list\n Assembly file.\n min_size : int\n Minimum contig size.", "id": "f6613:m0"} {"signature": "def get_plot_data(self):", "body": "json_dic = {\"\": []}sample_dic = {}sample_assembly_map = {}for entry in self.storage.values():sample_id = re.match(\"\", entry[\"\"]).groups()[]if sample_id not in sample_dic:sample_dic[sample_id] = {}contig_id = self._get_contig_id(entry[\"\"])database = entry[\"\"]if database not in sample_dic[sample_id]:sample_dic[sample_id][database] = []if sample_id not in sample_assembly_map:sample_assembly_map[sample_id] = entry[\"\"]sample_dic[sample_id][database].append({\"\": contig_id,\"\": entry[\"\"],\"\": entry[\"\"].replace(\"\", \"\"),\"\": entry[\"\"],\"\": entry[\"\"],\"\": entry[\"\"],},)for sample, data in sample_dic.items():json_dic[\"\"].append({\"\": sample,\"\": {\"\": data},\"\": sample_assembly_map[sample]})return json_dic", "docstring": "Generates the JSON report to plot the gene boxes\n\n Following the convention of the reports platform, this method returns\n a list of JSON/dict objects with the information about each entry in\n the abricate file. The information contained in this JSON is::\n\n {contig_id: ,\n seqRange: [, ],\n gene: ,\n accession: ,\n coverage: ,\n identity: \n }\n\n Note that the `seqRange` entry contains the position in the\n corresponding contig, not the absolute position in the whole assembly.\n\n Returns\n -------\n json_dic : list\n List of JSON/dict objects with the report data.", "id": "f6615:c1:m2"} {"signature": "def parse_log(self, bowtie_log):", "body": "print(\"\")regexes = {'': {'': r\"\",'': r\"\",'': r\"\"},'': {'': r\"\",'': r\"\",'': r\"\",'': r\"\",'': r\"\",'': r\"\",'': r\"\",'': r\"\"}}with open(bowtie_log, \"\") as f:for l in f:print(l)total = re.search(r\"\", l)print(total)if total:print(total)self.set_n_reads(total.group())paired = re.search(r\"\", l)if paired:paired_total = int(paired.group())paired_numbers = {}l = f.readline()while l.startswith(''):for k, r in regexes[''].items():match = re.search(r, l)if match:paired_numbers[k] = int(match.group())l = f.readline()align_zero_times = paired_numbers[''] + paired_numbers['']if align_zero_times:self.set_align_0x(align_zero_times)align_one_time = paired_numbers[''] + paired_numbers['']if align_one_time:self.set_align_1x(align_one_time)align_more_than_one_time = paired_numbers[''] + paired_numbers['']if align_more_than_one_time:self.set_align_mt1x(align_more_than_one_time)overall = re.search(r\"\", l)if overall:self.overall_rate = float(overall.group())", "docstring": "Parse a bowtie log file.\n\n This is a bowtie log parsing method that populates the\n :py:attr:`self.n_reads, self.align_0x, self.align_1x, self.align_mt1x and self.overall_rate` attributes with\n data from the log file.\n\n Disclamer: THIS METHOD IS HORRIBLE BECAUSE THE BOWTIE LOG IS HORRIBLE.\n\n The insertion of data on the attribytes is done by the\n :py:meth:`set_attribute method.\n\n Parameters\n ----------\n bowtie_log : str\n Path to the boetie log file.", "id": "f6616:c0:m6"} {"signature": "@MainWrapperdef main(sample_id, bowite_log):", "body": "logger.info(\"\")warnings = []fails = \"\"bowtie_info = Bowtie(sample_id, bowite_log)print(bowtie_info.overall_rate)with open(\"\", \"\") as json_report:json_dic = {\"\": [{\"\": sample_id,\"\": [{\"\": \"\",\"\": int(bowtie_info.n_reads),\"\": \"\",\"\": False},{\"\": \"\",\"\": int(bowtie_info.align_0x),\"\": \"\",\"\": False},{\"\": \"\",\"\": int(bowtie_info.align_1x),\"\": \"\",\"\": False},{\"\": \"\",\"\": int(bowtie_info.align_mt1x),\"\": \"\",\"\": False},{\"\": \"\",\"\": float(bowtie_info.overall_rate),\"\": \"\",\"\": False}]}],}if warnings:json_dic[\"\"] = [{\"\": sample_id,\"\": \"\",\"\": warnings}]if fails:json_dic[\"\"] = [{\"\": sample_id,\"\": \"\",\"\": [fails]}]json_report.write(json.dumps(json_dic, separators=(\"\", \"\")))with open(\"\", \"\") as status_fh:status_fh.write(\"\")", "docstring": "Main executor of the process_mapping template.\n\n Parameters\n ----------\n sample_id : str\n Sample Identification string.\n boetie_log: str\n Path to the log file generated by bowtie.", "id": "f6616:m0"} {"signature": "def depth_file_reader(depth_file):", "body": "depth_dic_coverage = {}for line in depth_file:tab_split = line.split() reference = \"\".join(tab_split[].strip().split(\"\")[:]) position = tab_split[]num_reads_align = float(tab_split[].rstrip())if reference not in depth_dic_coverage:depth_dic_coverage[reference] = {}depth_dic_coverage[reference][position] = num_reads_alignlogger.info(\"\")depth_file.close()logger.debug(\"\".format(asizeof(depth_dic_coverage)/))return depth_dic_coverage", "docstring": "Function that parse samtools depth file and creates 3 dictionaries that\nwill be useful to make the outputs of this script, both the tabular file\nand the json file that may be imported by pATLAS\n\nParameters\n----------\ndepth_file: textIO\n the path to depth file for each sample\n\nReturns\n-------\ndepth_dic_coverage: dict\n dictionary with the coverage per position for each plasmid", "id": "f6617:m0"} {"signature": "def get_encodings_in_range(rmin, rmax):", "body": "valid_encodings = []valid_phred = []for encoding, (phred, (emin, emax)) in RANGES.items():if rmin >= emin and rmax <= emax:valid_encodings.append(encoding)valid_phred.append(phred)return valid_encodings, valid_phred", "docstring": "Returns the valid encodings for a given encoding range.\n\n The encoding ranges are stored in the :py:data:`RANGES` dictionary, with\n the encoding name as a string and a list as a value containing the\n phred score and a tuple with the encoding range. For a given encoding\n range provided via the two first arguments, this function will return\n all possible encodings and phred scores.\n\n Parameters\n ----------\n rmin : int\n Minimum Unicode code in range.\n rmax : int\n Maximum Unicode code in range.\n\n Returns\n -------\n valid_encodings : list\n List of all possible encodings for the provided range.\n valid_phred : list\n List of all possible phred scores.", "id": "f6618:m2"} {"signature": "def build_versions(self):", "body": "version_storage = []template_version = self.context.get(\"\", None)template_program = self.context.get(\"\", None)template_build = self.context.get(\"\", None)if template_version and template_program and template_build:if self.logger:self.logger.debug(\"\"\"\".format(template_program,template_version,template_build))version_storage.append({\"\": template_program,\"\": template_version,\"\": template_build})for var, obj in self.context.items():if var.startswith(\"\"):ver = obj()version_storage.append(ver)if self.logger:self.logger.debug(\"\"\"\".format(ver))with open(\"\", \"\") as fh:fh.write(json.dumps(version_storage, separators=(\"\", \"\")))", "docstring": "Writes versions JSON for a template file\n\n This method creates the JSON file ``.versions`` based on the metadata\n and specific functions that are present in a given template script.\n\n It starts by fetching the template metadata, which can be specified\n via the ``__version__``, ``__template__`` and ``__build__``\n attributes. If all of these attributes exist, it starts to populate\n a JSON/dict array (Note that the absence of any one of them will\n prevent the version from being written).\n\n Then, it will search the\n template scope for functions that start with the substring\n ``__set_version`` (For example ``def __set_version_fastqc()`).\n These functions should gather the version of\n an arbitrary program and return a JSON/dict object with the following\n information::\n\n {\n \"program\": ,\n \"version\": \n \"build\": \n }\n\n This JSON/dict object is then written in the ``.versions`` file.", "id": "f6619:c0:m2"} {"signature": "def _build_connections(self, process_list, ignore_dependencies,auto_dependency):", "body": "logger.debug(\"\")logger.debug(\"\")logger.debug(\"\")logger.debug(\"\".format(process_list))for p, con in enumerate(process_list):logger.debug(\"\".format(p, con))in_lane = con[\"\"][\"\"]out_lane = con[\"\"][\"\"]logger.debug(\"\".format(p, in_lane))logger.debug(\"\".format(p, out_lane))if out_lane > self.lanes:self.lanes = out_lanep_in_name, p_out_name, out_directives = self._get_process_names(con, p)if p_out_name not in self.process_map:logger.error(colored_print(\"\".format(p_out_name), \"\"))guess_process(p_out_name, self.process_map)sys.exit()out_process = self.process_map[p_out_name](template=p_out_name)if out_directives:out_process.update_attributes(out_directives)input_suf = \"\".format(in_lane, p)output_suf = \"\".format(out_lane, p)logger.debug(\"\"\"\".format(p, input_suf, output_suf))out_process.set_main_channel_names(input_suf, output_suf, out_lane)if p_in_name != \"\":in_process = self.process_map[p_in_name](template=p_in_name)logger.debug(\"\"\"\".format(p))self._test_connection(in_process, out_process)out_process.parent_lane = in_laneelse:out_process.parent_lane = Nonelogger.debug(\"\".format(p, out_process.parent_lane))if in_lane != out_lane:logger.debug(\"\"\"\".format(p))self._fork_tree[in_lane].append(out_lane)try:parent_process = [x for x in self.processes if x.lane == in_lane andx.template == p_in_name][]logger.debug(\"\"\"\".format(p, parent_process,out_process.input_channel))parent_process.update_main_forks(out_process.input_channel)except IndexError:passelse:parent_process = self.processes[-]if parent_process.lane and parent_process.lane != out_lane:parent_process = [x for x in self.processes[::-]if x.lane == out_lane][]if parent_process.output_channel:logger.debug(\"\"\"\".format(p, parent_process.output_channel))out_process.input_channel = parent_process.output_channelif out_process.dependencies and not ignore_dependencies:logger.debug(\"\"\"\".format(p, p_out_name,out_process.dependencies))parent_lanes = self._get_fork_tree(out_lane)for dep in out_process.dependencies:if not self._search_tree_backwards(dep, parent_lanes):if auto_dependency:self._add_dependency(out_process, dep, in_lane, out_lane, p)elif not self.export_parameters:logger.error(colored_print(\"\"\"\".format(p_out_name, dep),\"\"))sys.exit()self.processes.append(out_process)logger.debug(\"\".format(self.processes))logger.debug(\"\".format(self._fork_tree))", "docstring": "Parses the process connections dictionaries into a process list\n\n This method is called upon instantiation of the NextflowGenerator\n class. Essentially, it sets the main input/output channel names of the\n processes so that they can be linked correctly.\n\n If a connection between two consecutive process is not possible due\n to a mismatch in the input/output types, it exits with an error.\n\n Returns\n -------", "id": "f6624:c0:m2"} {"signature": "def _update_secondary_channels(self, p):", "body": "if p.link_start:logger.debug(\"\".format(p.template, p.link_start))for l in p.link_start:if l in self.secondary_channels:self.secondary_channels[l][p.lane] = {\"\": p, \"\": []}else:self.secondary_channels[l] = {p.lane: {\"\": p, \"\": []}}if p.link_end:logger.debug(\"\".format(p.template, p.link_end))for l in p.link_end:parent_forks = self._get_fork_tree(p.lane)if l[\"\"].startswith(\"\"):self._set_implicit_link(p, l)continueif l[\"\"] not in self.secondary_channels:continuefor lane in parent_forks:if lane in self.secondary_channels[l[\"\"]]:self.secondary_channels[l[\"\"]][lane][\"\"].append(\"\".format(\"\".format(l[\"\"], p.pid)))logger.debug(\"\".format(p.template, self.secondary_channels))", "docstring": "Given a process, this method updates the\n :attr:`~Process.secondary_channels` attribute with the corresponding\n secondary inputs of that channel.\n\n The rationale of the secondary channels is the following:\n\n - Start storing any secondary emitting channels, by checking the\n `link_start` list attribute of each process. If there are\n channel names in the link start, it adds to the secondary\n channels dictionary.\n - Check for secondary receiving channels, by checking the\n `link_end` list attribute. If the link name starts with a\n `__` signature, it will created an implicit link with the last\n process with an output type after the signature. Otherwise,\n it will check is a corresponding link start already exists in\n the at least one process upstream of the pipeline and if so,\n it will update the ``secondary_channels`` attribute with the\n new link.\n\n Parameters\n ----------\n p : flowcraft.Process.Process", "id": "f6624:c0:m13"} {"signature": "def _get_process_names(self, con, pid):", "body": "try:_p_in_name = con[\"\"][\"\"]p_in_name, _ = self._parse_process_name(_p_in_name)logger.debug(\"\".format(pid, p_in_name))_p_out_name = con[\"\"][\"\"]p_out_name, out_directives = self._parse_process_name(_p_out_name)logger.debug(\"\".format(pid, p_out_name))except eh.ProcessError as ex:logger.error(colored_print(ex.value, \"\"))sys.exit()return p_in_name, p_out_name, out_directives", "docstring": "Returns the input/output process names and output process directives\n\n Parameters\n ----------\n con : dict\n Dictionary with the connection information between two processes.\n\n Returns\n -------\n input_name : str\n Name of the input process\n output_name : str\n Name of the output process\n output_directives : dict\n Parsed directives from the output process", "id": "f6624:c0:m3"} {"signature": "def _update_extra_inputs(self, p):", "body": "if p.extra_input:logger.debug(\"\".format(p.template, p.extra_input))if p.extra_input == \"\":if p.input_type in self.main_raw_inputs:logger.error(colored_print(\"\"\"\"\"\"\"\".format(p.input_type, p.template), \"\"))sys.exit()param = p.input_typeelse:param = p.extra_inputdest_channel = \"\".format(p.template, p.pid)if param not in self.extra_inputs:self.extra_inputs[param] = {\"\": p.input_type,\"\": [dest_channel]}else:if self.extra_inputs[param][\"\"] != p.input_type:logger.error(colored_print(\"\"\"\"\"\"\"\".format(p.input_type, p.template,self.extra_inputs[param][\"\"]),\"\"))sys.exit()self.extra_inputs[param][\"\"].append(dest_channel)logger.debug(\"\"\"\".format(p.template, param,self.extra_inputs[param]))p.update_main_input(\"\".format(p.input_channel, dest_channel))", "docstring": "Given a process, this method updates the\n :attr:`~Process.extra_inputs` attribute with the corresponding extra\n inputs of that process\n\n Parameters\n ----------\n p : flowcraft.Process.Process", "id": "f6624:c0:m10"} {"signature": "def render_pipeline(self):", "body": "dict_viz = {\"\": \"\",\"\": []}last_of_us = {}f_tree = self._fork_tree if self._fork_tree else {: []}for x, (k, v) in enumerate(f_tree.items()):for p in self.processes[:]:if x == and p.lane not in [k] + v:continueif x > and p.lane not in v:continueif not p.parent_lane:lst = dict_viz[\"\"]else:lst = last_of_us[p.parent_lane]tooltip = {\"\": \"\".format(p.template, p.pid),\"\": {\"\": p.pid,\"\": p.input_type,\"\": p.output_type if p.output_type else \"\",\"\": p.lane,},\"\": []}dir_var = \"\"for k2, v2 in p.directives.items():dir_var += k2for d in v2:try:directive = v2[d].replace(\"\", \"\").replace('', '')if isinstance(v2[d], str) else v2[d]dir_var += \"\".format(d, directive)except KeyError:passif dir_var:tooltip[\"\"][\"\"] = dir_varelse:tooltip[\"\"][\"\"] = \"\"lst.append(tooltip)last_of_us[p.lane] = lst[-][\"\"]self.dag_to_file(dict_viz)with open(os.path.join(dirname(self.nf_file),\"\"), \"\") as fh:fh.write(json.dumps(self._fork_tree))return self._render_config(\"\", {\"\": dict_viz})", "docstring": "Write pipeline attributes to json\n\n This function writes the pipeline and their attributes to a json file,\n that is intended to be read by resources/pipeline_graph.html to render\n a graphical output showing the DAG.", "id": "f6624:c0:m31"} {"signature": "def fetch_docker_tags(self):", "body": "dict_of_parsed = {}terminal_width = shutil.get_terminal_size().columns - center_string = \"\"tags_list = [[\"\" * int(terminal_width / ),\"\".format(\"\" * int(((terminal_width/ - len(center_string)) / )),center_string),\"\".format(\"\" * int(terminal_width / ))],[\"\", \"\", \"\"],[\"\" * int(terminal_width / ),\"\" * int(terminal_width / ),\"\" * int(terminal_width / )]]for p in self.processes[:]:template = p.templateif template in dict_of_parsed:continuedict_of_parsed[template] = {\"\": []}for directives in p.directives.values():try:repo = directives[\"\"]default_version = directives[\"\"]except KeyError:repo = \"\"default_version = \"\"repo_version = repo + default_versionif repo_version not in dict_of_parsed[template][\"\"]:r = requests.get(\"\".format(repo))if r.status_code != :r_content = json.loads(r.content)[\"\"]for version in r_content:printed_version = (version[\"\"] + \"\")if version[\"\"] == default_versionelse version[\"\"]tags_list.append([template, repo, printed_version])else:tags_list.append([template, repo, \"\"])dict_of_parsed[template][\"\"].append(repo_version)for x, entry in enumerate(tags_list):color = \"\" if x < else(\"\" if x % != else \"\")final_width = [int(terminal_width/),int(terminal_width/),int(terminal_width/)]sys.stdout.write(colored_print(\"\".format(*entry, *final_width), color))sys.stdout.write(\"\".format(\"\",terminal_width + ))", "docstring": "Export all dockerhub tags associated with each component given by\nthe -t flag.", "id": "f6624:c0:m35"} {"signature": "def export_params(self):", "body": "params_json = {}for p in self.processes[:]:params_json[p.template] = p.paramssys.stdout.write(json.dumps(params_json))", "docstring": "Export pipeline params as a JSON to stdout\n\n This run mode iterates over the pipeline processes and exports the\n params dictionary of each component as a JSON to stdout.", "id": "f6624:c0:m33"} {"signature": "def build(self):", "body": "logger.info(colored_print(\"\"\"\".format(len(self.processes[:]), len(self._fork_tree), self.lanes)))self._build_header()self._set_channels()self._set_init_process()self._set_secondary_channels()logger.info(colored_print(\"\".format(len(self.secondary_channels))))self._set_compiler_channels()self._set_configurations()logger.info(colored_print(\"\"))for p in self.processes:self.template += \"\".format(p.template_str)self._build_footer()project_root = dirname(self.nf_file)self.write_configs(project_root)with open(self.nf_file, \"\") as fh:fh.write(self.template)logger.info(colored_print(\"\".format(self.nf_file)))", "docstring": "Main pipeline builder\n\n This method is responsible for building the\n :py:attr:`NextflowGenerator.template` attribute that will contain\n the nextflow code of the pipeline.\n\n First it builds the header, then sets the main channels, the\n secondary inputs, secondary channels and finally the\n status channels. When the pipeline is built, is writes the code\n to a nextflow file.", "id": "f6624:c0:m36"} {"signature": "def dag_to_file(self, dict_viz, output_file=\"\"):", "body": "outfile_dag = open(os.path.join(dirname(self.nf_file), output_file), \"\")outfile_dag.write(json.dumps(dict_viz))outfile_dag.close()", "docstring": "Writes dag to output file\n\n Parameters\n ----------\n dict_viz: dict\n Tree like dictionary that is used to export tree data of processes\n to html file and here for the dotfile .treeDag.json", "id": "f6624:c0:m30"} {"signature": "def _check_required_files(self):", "body": "if not os.path.exists(self.trace_file):raise eh.InspectionError(\"\"\"\".format(self.trace_file))if not os.path.exists(self.log_file):raise eh.InspectionError(\"\"\"\"\"\")", "docstring": "Checks whetner the trace and log files are available", "id": "f6626:c0:m1"} {"signature": "def _cpu_load_parser(self, cpus, cpu_per, t):", "body": "try:_cpus = float(cpus)_cpu_per = float(cpu_per.replace(\"\", \"\").replace(\"\", \"\"))hours = self._hms(t) / / return ((_cpu_per / ( * _cpus)) * _cpus) * hoursexcept ValueError:return ", "docstring": "Parses the cpu load from the number of cpus and its usage\n percentage and returnsde cpu/hour measure\n\n Parameters\n ----------\n cpus : str\n Number of cpus allocated.\n cpu_per : str\n Percentage of cpu load measured (e.g.: 200,5%).\n t : str\n The time string can be something like '20s', '1m30s' or '300ms'.", "id": "f6626:c0:m15"} {"signature": "def _get_run_hash(self):", "body": "pipeline_path = get_nextflow_filepath(self.log_file)pipeline_hash = hashlib.md5()with open(pipeline_path, \"\") as fh:for chunk in iter(lambda: fh.read(), b\"\"):pipeline_hash.update(chunk)workdir = self.workdir.encode(\"\")hostname = socket.gethostname().encode(\"\")hardware_addr = str(uuid.getnode()).encode(\"\")dir_hash = hashlib.md5(workdir + hostname + hardware_addr)return pipeline_hash.hexdigest() + dir_hash.hexdigest()", "docstring": "Gets the hash of the nextflow file", "id": "f6626:c0:m37"} {"signature": "def _rightleft(self, direction):", "body": "if direction == \"\" and self.padding != :self.padding -= if direction == \"\" andself.screen.getmaxyx()[] + self.padding < self.max_width:self.padding += ", "docstring": "Provides curses horizontal padding", "id": "f6626:c0:m24"} {"signature": "def signal_handler(screen):", "body": "if screen:screen.clear()screen.refresh()curses.nocbreak()screen.keypad()curses.echo()curses.endwin()print(\"\")sys.exit()", "docstring": "This function is bound to the SIGINT signal (like ctrl+c) to graciously\n exit the program and reset the curses options.", "id": "f6626:m0"} {"signature": "def _updown(self, direction):", "body": "if direction == \"\" and self.top_line != :self.top_line -= elif direction == \"\" andself.screen.getmaxyx()[] + self.top_line<= self.content_lines + :self.top_line += ", "docstring": "Provides curses scroll functionality.", "id": "f6626:c0:m23"} {"signature": "def update_inspection(self):", "body": "try:self.log_parser()except (FileNotFoundError, StopIteration) as e:logger.debug(\"\" + str(sys.exc_info()[]))self.log_retry += if self.log_retry == self.MAX_RETRIES:raise etry:self.trace_parser()except (FileNotFoundError, StopIteration) as e:logger.debug(\"\" + str(sys.exc_info()[]))self.trace_retry += if self.trace_retry == self.MAX_RETRIES:raise e", "docstring": "Wrapper method that calls the appropriate main updating methods of\n the inspection.\n\n It is meant to be used inside a loop (like while), so that it can\n continuously update the class attributes from the trace and log files.\n It already implements checks to parse these files only when they\n change, and they ignore entries that have been previously processes.", "id": "f6626:c0:m20"} {"signature": "def _update_tag_status(self, process, vals):", "body": "good_status = [\"\", \"\"]for v in list(vals)[::-]:p = self.processes[process]tag = v[\"\"]if tag in p[\"\"]:p[\"\"].remove(tag)if v[\"\"] in good_status:p[\"\"].add(tag)elif v[\"\"] == \"\":if not v[\"\"]:v[\"\"] = \"\"self.process_tags[process][tag][\"\"] =self._retrieve_log(join(v[\"\"], \"\"))p[\"\"].add(tag)elif tag in p[\"\"]:if v[\"\"] in good_status:p[\"\"].remove(tag)p[\"\"].remove(tag)del self.process_tags[process][tag][\"\"]elif self.run_status == \"\":p[\"\"].remove(tag)elif v[\"\"] in good_status:p[\"\"].add(tag)if v[\"\"] not in good_status:if v[\"\"] in list(p[\"\"]) + list(p[\"\"]):vals.remove(v)return vals", "docstring": "Updates the 'submitted', 'finished', 'failed' and 'retry' status\n of each process/tag combination.\n\n Process/tag combinations provided to this method already appear on\n the trace file, so their submission status is updated based on their\n execution status from nextflow.\n\n For instance, if a tag is successfully\n complete, it is moved from the 'submitted' to the 'finished' list.\n If not, it is moved to the 'failed' list.\n\n Parameters\n ----------\n process : str\n Name of the current process. Must be present in attr:`processes`\n vals : list\n List of tags for this process that have been gathered in the\n trace file.", "id": "f6626:c0:m10"} {"signature": "def _get_log_lines(self, n=):", "body": "with open(self.log_file) as fh:last_lines = fh.readlines()[-n:]return last_lines", "docstring": "Returns a list with the last ``n`` lines of the nextflow log file\n\n Parameters\n ----------\n n : int\n Number of last lines from the log file\n\n Returns\n -------\n list\n List of strings with the nextflow log", "id": "f6626:c0:m30"} {"signature": "def _dag_file_to_dict(self):", "body": "try:dag_file = open(os.path.join(self.workdir, \"\"))dag_json = json.load(dag_file)except (FileNotFoundError, json.decoder.JSONDecodeError):logger.warning(colored_print(\"\",\"\"))dag_json = {}return dag_json", "docstring": "Function that opens the dotfile named .treeDag.json in the current\n working directory\n\n Returns\n -------\n Returns a dictionary with the dag object to be used in the post\n instance available through the method _establish_connection", "id": "f6626:c0:m34"} {"signature": "def _update_process_stats(self):", "body": "good_status = [\"\", \"\"]for process, vals in self.trace_info.items():vals = self._update_tag_status(process, vals)self._update_process_resources(process, vals)self.process_stats[process] = {}inst = self.process_stats[process]inst[\"\"] = \"\".format(len([x for x in vals if x[\"\"] in good_status]))try:time_array = [self._hms(x[\"\"]) for x in vals]mean_time = round(sum(time_array) / len(time_array), )mean_time_str = strftime('', gmtime(mean_time))inst[\"\"] = mean_time_strexcept KeyError:inst[\"\"] = \"\"try:cpu_hours = [self._cpu_load_parser(x[\"\"], x[\"\"], x[\"\"]) for x in vals]inst[\"\"] = round(sum(cpu_hours), )except KeyError:inst[\"\"] = \"\"inst[\"\"], inst[\"\"] =self._assess_resource_warnings(process, vals)try:rss_values = [self._size_coverter(x[\"\"]) for x in valsif x[\"\"] != \"\"]if rss_values:max_rss = round(max(rss_values))rss_str = self._size_compress(max_rss)else:rss_str = \"\"inst[\"\"] = rss_strexcept KeyError:inst[\"\"] = \"\"try:rchar_values = [self._size_coverter(x[\"\"]) for x in valsif x[\"\"] != \"\"]if rchar_values:avg_rchar = round(sum(rchar_values) / len(rchar_values))rchar_str = self._size_compress(avg_rchar)else:rchar_str = \"\"except KeyError:rchar_str = \"\"inst[\"\"] = rchar_strtry:wchar_values = [self._size_coverter(x[\"\"]) for x in valsif x[\"\"] != \"\"]if wchar_values:avg_wchar = round(sum(wchar_values) / len(wchar_values))wchar_str = self._size_compress(avg_wchar)else:wchar_str = \"\"except KeyError:wchar_str = \"\"inst[\"\"] = wchar_str", "docstring": "Updates the process stats with the information from the processes\n\n This method is called at the end of each static parsing of the nextflow\n trace file. It re-populates the :attr:`process_stats` dictionary\n with the new stat metrics.", "id": "f6626:c0:m17"} {"signature": "def linear_lane_connection(lane_list, lane):", "body": "logger.debug(\"\".format(lane_list))res = []lane += for l in lane_list:res.extend(linear_connection(l, lane))lane += return res", "docstring": "Parameters\n----------\nlane_list : list\n Each element should correspond to a list of processes for a given lane\nlane : int\n Lane counter before the fork start\n\nReturns\n-------\nres : list\n List of dictionaries with the links between processes", "id": "f6630:m17"} {"signature": "def start_proc_insanity_check(p_string):", "body": "if FORK_TOKEN + FORK_TOKEN in p_string:raise SanityError(\"\"\"\"\"\")", "docstring": "This function checks if there is a starting process after the beginning of\neach fork. It checks for duplicated start tokens ['(('].\n\nParameters\n----------\np_string: str\n String with the definition of the pipeline, e.g.::\n 'processA processB processC(ProcessD | ProcessE)'", "id": "f6630:m8"} {"signature": "def remove_unique_identifiers(identifiers_to_tags, pipeline_links):", "body": "for index, val in enumerate(pipeline_links):if val[\"\"][\"\"] != \"\":val[\"\"][\"\"] = identifiers_to_tags[val[\"\"][\"\"]]if val[\"\"][\"\"] != \"\":val[\"\"][\"\"] = identifiers_to_tags[val[\"\"][\"\"]]return pipeline_links", "docstring": "Removes unique identifiers and add the original process names to the\n already parsed pipelines\n\n Parameters\n ----------\n identifiers_to_tags : dict\n Match between unique process identifiers and process names\n pipeline_links: list\n Parsed pipeline list with unique identifiers\n\n Returns\n -------\n list\n Pipeline list with original identifiers", "id": "f6630:m19"} {"signature": "def brackets_insanity_check(p_string):", "body": "if p_string.count(FORK_TOKEN) != p_string.count(CLOSE_TOKEN):dict_values = {FORK_TOKEN: p_string.count(FORK_TOKEN),CLOSE_TOKEN: p_string.count(CLOSE_TOKEN)}max_bracket = max(dict_values, key=dict_values.get)raise SanityError(\"\"\"\".format(str(abs(p_string.count(FORK_TOKEN) - p_string.count(CLOSE_TOKEN))),max_bracket))", "docstring": "This function performs a check for different number of '(' and ')'\ncharacters, which indicates that some forks are poorly constructed.\n\nParameters\n----------\np_string: str\n String with the definition of the pipeline, e.g.::\n 'processA processB processC(ProcessD | ProcessE)'", "id": "f6630:m4"} {"signature": "def late_proc_insanity_check(p_string):", "body": "if re.search(''.format(CLOSE_TOKEN), p_string):raise SanityError(\"\"\"\")", "docstring": "This function checks if there are processes after the close token. It\nsearches for everything that isn't \"|\" or \")\" after a \")\" token.\n\nParameters\n----------\np_string: str\n String with the definition of the pipeline, e.g.::\n 'processA processB processC(ProcessD | ProcessE)'", "id": "f6630:m9"} {"signature": "def remove_inner_forks(text):", "body": "n = while n:text, n = re.subn(r'', '', text)return text", "docstring": "Recursively removes nested brackets\n\n This function is used to remove nested brackets from fork strings using\n regular expressions\n\n Parameters\n ----------\n text: str\n The string that contains brackets with inner forks to be removed\n\n Returns\n -------\n text: str\n the string with only the processes that are not in inner forks, thus\n the processes that belong to a given fork.", "id": "f6630:m1"} {"signature": "def set_param_id(self, param_id):", "body": "self._context = {**self._context, \"\": param_id}", "docstring": "Sets the param_id for the process, which will be used to render\n the template.\n\n Parameters\n ----------\n param_id : str\n The :attr:`param_id` attribute of the process.", "id": "f6631:c0:m3"} {"signature": "def update_log_watch(self):", "body": "size_stamp = os.path.getsize(self.log_file)self.trace_retry = if size_stamp and size_stamp == self.log_sizestamp:returnelse:logger.debug(\"\".format(size_stamp))self.log_sizestamp = size_stampself._update_pipeline_status()", "docstring": "Parses nextflow log file and updates the run status", "id": "f6632:c0:m7"} {"signature": "@staticmethoddef _expand_path(hash_str):", "body": "try:first_hash, second_hash = hash_str.split(\"\")first_hash_path = join(abspath(\"\"), first_hash)for l in os.listdir(first_hash_path):if l.startswith(second_hash):return join(first_hash_path, l)except FileNotFoundError:return None", "docstring": "Expands the hash string of a process (ae/1dasjdm) into a full\n working directory\n\n Parameters\n ----------\n hash_str : str\n Nextflow process hash with the beggining of the work directory\n\n Returns\n -------\n str\n Path to working directory of the hash string", "id": "f6632:c0:m3"} {"signature": "def _init_live_reports(self, report_id):", "body": "logger.debug(\"\"\"\".format(self.broadcast_address))try:with open(\"\") as fh:metadata = [json.load(fh)]except:metadata = []start_json = {\"\": {\"\": metadata}}try:requests.post(self.broadcast_address,json={\"\": report_id, \"\": start_json,\"\": self.status_info})except requests.exceptions.ConnectionError:logger.error(colored_print(\"\"\"\"\"\", \"\"))sys.exit()", "docstring": "Sends a POST request to initialize the live reports\n\n Parameters\n ----------\n report_id : str\n Hash of the report JSON as retrieved from :func:`~_get_report_hash`", "id": "f6632:c0:m9"} {"signature": "def _get_report_id(self):", "body": "if self.watch:pipeline_path = get_nextflow_filepath(self.log_file)pipeline_hash = hashlib.md5()with open(pipeline_path, \"\") as fh:for chunk in iter(lambda: fh.read(), b\"\"):pipeline_hash.update(chunk)workdir = os.getcwd().encode(\"\")hostname = socket.gethostname().encode(\"\")hardware_addr = str(uuid.getnode()).encode(\"\")dir_hash = hashlib.md5(workdir + hostname + hardware_addr)return pipeline_hash.hexdigest() + dir_hash.hexdigest()else:with open(self.report_file) as fh:report_json = json.loads(fh.read())metadata = report_json[\"\"][\"\"][][\"\"]try:report_id = metadata[\"\"] + metadata[\"\"]except KeyError:raise eh.ReportError(\"\"\"\"\"\")return report_id", "docstring": "Returns a hash of the reports JSON file", "id": "f6632:c0:m4"} {"signature": "def build_downstream(self, process_descriptions, task, all_tasks,task_pipeline,count_forks, total_tasks, forks):", "body": "if task in process_descriptions:if process_descriptions[task][] is not None:if len(process_descriptions[task][].split(\"\")) > :local_forks = process_descriptions[task][].split(\"\")for local_fork in local_forks:if local_fork in total_tasks:count_forks += task_pipeline.append(process_descriptions[task][])self.define_pipeline_string(process_descriptions,local_fork,False,True,count_forks,total_tasks,forks)return task_pipelineelse:if process_descriptions[task][] in total_tasks:task_pipeline.append(process_descriptions[task][].split(\"\")[])self.build_downstream(process_descriptions,process_descriptions[task][].split(\"\")[],all_tasks,task_pipeline,count_forks,total_tasks,forks)return task_pipelineelse:return task_pipeline", "docstring": "Builds the downstream pipeline of the current process\n\n Checks for the downstream processes to the current process and\n adds them to the current pipeline fragment.\n\n Parameters\n ----------\n process_descriptions : dict\n Information of processes input, output and if is forkable\n task : str\n Current process\n all_tasks : list\n A list of all provided processes\n task_pipeline : list\n Current pipeline fragment\n count_forks : int\n Current number of forks\n total_tasks : str\n All space separated processes\n forks : list\n Current forks\n Returns\n -------\n list : resulting pipeline fragment", "id": "f6651:c0:m3"} {"signature": "def list_recipes(full=False):", "body": "logger.info(colored_print(\"\",\"\"))prefix = \"\".format(recipes.__name__)for importer, modname, _ in pkgutil.iter_modules(recipes.__path__, prefix):_module = importer.find_module(modname).load_module(modname)_recipe_classes = [cls for cls in _module.__dict__.values() ifisinstance(cls, type)]for cls in _recipe_classes:recipe_cls = cls()if hasattr(recipe_cls, \"\"):logger.info(colored_print(\"\".format(recipe_cls.name), \"\"))if full:logger.info(colored_print(\"\".format(recipe_cls.__doc__), \"\"))logger.info(colored_print(\"\".format(recipe_cls.pipeline_str), \"\"))sys.exit()", "docstring": "Method that iterates over all available recipes and prints their\n information to the standard output\n\n Parameters\n ----------\n full : bool\n If true, it will provide the pipeline string along with the recipe name", "id": "f6651:m2"} {"signature": "def define_pipeline_string(self, process_descriptions, tasks,check_upstream,check_downstream, count_forks, total_tasks,forks):", "body": "tasks_array = tasks.split()for task_unsplit in tasks_array:task = task_unsplit.split(\"\")[]if task not in process_descriptions.keys():logger.error(colored_print(\"\".format(task),\"\"))sys.exit()else:process_split = task_unsplit.split(\"\")if len(process_split) > :self.process_to_id[process_split[]] = process_split[]if not bool([x for x in forks if task in x]) and not bool([y for y in forks if process_descriptions[task][] in y]):task_pipeline = []if task in process_descriptions:if check_upstream:task_pipeline = self.build_upstream(process_descriptions,task,tasks_array,task_pipeline,count_forks,total_tasks,forks)task_pipeline.append(task)if check_downstream:task_pipeline = self.build_downstream(process_descriptions,task,tasks_array,task_pipeline,count_forks,total_tasks,forks)forks.append(list(OrderedDict.fromkeys(task_pipeline)))elif bool([y for y in forks if process_descriptions[task][] in y]):for fork in forks:if task not in fork:try:dependent_index = fork.index(process_descriptions[task][])fork.insert(dependent_index, task)except ValueError:continuefor i in range(, len(forks)):for j in range(, len(forks[i])):try:if len(forks[i][j].split(\"\")) > :forks[i][j] = forks[i][j].split(\"\")tmp_fork = []for s in forks[i][j]:if s in total_tasks:tmp_fork.append(s)forks[i][j] = tmp_forkexcept AttributeError as e:continuereturn forks", "docstring": "Builds the possible forks and connections between the provided\n processes\n\n This method loops through all the provided tasks and builds the\n upstream and downstream pipeline if required. It then returns all\n possible forks than need to be merged \u00e0 posteriori`\n\n Parameters\n ----------\n process_descriptions : dict\n Information of processes input, output and if is forkable\n tasks : str\n Space separated processes\n check_upstream : bool\n If is to build the upstream pipeline of the current task\n check_downstream : bool\n If is to build the downstream pipeline of the current task\n count_forks : int\n Number of current forks\n total_tasks : str\n All space separated processes\n forks : list\n Current forks\n\n Returns\n -------\n list : List with all the possible pipeline forks", "id": "f6651:c0:m4"} {"signature": "@staticmethoddef validate_pipeline(pipeline_string):", "body": "if \"\" in pipeline_string or \"\" in pipeline_string or \"\" inpipeline_string:logger.error(colored_print(\"\", \"\"))return Falsereturn True", "docstring": "Validate pipeline string\n\n Validates the pipeline string by searching for forbidden characters\n\n Parameters\n ----------\n pipeline_string : str\n STring with the processes provided\n\n Returns\n -------", "id": "f6651:c0:m1"} {"signature": "def update_config_pwd(msg, cfg):", "body": "msg_type = msg.__class__.__name__.lower()key_fmt = msg.profile + \"\" + msg_typeif isinstance(msg._auth, (MutableSequence, tuple)):cfg.pwd[key_fmt] = \"\".join(msg._auth)else:cfg.pwd[key_fmt] = msg._auth", "docstring": "Updates the profile's auth entry with values set by the user.\nThis will overwrite existing values.\n\nArgs:\n :msg: (Message class) an instance of a message class.\n :cfg: (jsonconfig.Config) config instance.", "id": "f6674:m5"} {"signature": "def verify_profile_name(msg, cfg):", "body": "if msg.profile not in cfg.data:raise UnknownProfileError(msg.profile)", "docstring": "Verifies the profile name exists in the config.json file.\n\nArgs:\n :msg: (Message class) an instance of a message class.\n :cfg: (jsonconfig.Config) config instance.", "id": "f6674:m1"} {"signature": "def configure_profile(msg_type, profile_name, data, auth):", "body": "with jsonconfig.Config(\"\", indent=) as cfg:write_data(msg_type, profile_name, data, cfg)write_auth(msg_type, profile_name, auth, cfg)print(\"\" + profile_name + \"\")print(\"\" + cfg.filename)", "docstring": "Create the profile entry.\n\nArgs:\n :msg_type: (str) message type to create config entry.\n :profile_name: (str) name of the profile entry\n :data: (dict) dict values for the 'settings'\n :auth: (dict) auth parameters", "id": "f6674:m11"} {"signature": "def _construct_message(self):", "body": "self.message[\"\"] = self.chat_idself.message[\"\"] = \"\"if self.from_:self.message[\"\"] += \"\" + self.from_ + \"\"if self.subject:self.message[\"\"] += \"\" + self.subject + \"\"self.message[\"\"] += self.bodyself.message.update(self.params)", "docstring": "Build the message params.", "id": "f6675:c0:m3"} {"signature": "def get_chat_id(self, username):", "body": "if username is not None:chats = requests.get(self.base_url + \"\").json()user = username.split(\"\")[-]for chat in chats[\"\"]:if chat[\"\"][\"\"][\"\"] == user:return chat[\"\"][\"\"][\"\"]", "docstring": "Lookup chat_id of username if chat_id is unknown via API call.", "id": "f6675:c0:m2"} {"signature": "def send_async(self):", "body": "MESSAGELOOP.add_message(self)", "docstring": "Send message asynchronously.", "id": "f6675:c0:m6"} {"signature": "@abstractmethoddef send(self):", "body": "", "docstring": "Send message synchronously.", "id": "f6676:c0:m0"} {"signature": "def __repr__(self):", "body": "class_name = type(self).__name__output = \"\".format(class_name)for attr in self:if attr == \"\":output += \"\"elif attr == \"\":output += \"\".format(attr, reprlib.repr(getattr(self, attr)))else:output += \"\".format(attr, getattr(self, attr))output += \"\"return output", "docstring": "repr(self) in debugging format with auth attr obfuscated.", "id": "f6676:c0:m2"} {"signature": "def send(self):", "body": "self._generate_email()if self.verbose:print(\"\"\"\"\"\".format(timestamp()))recipients = []for i in (self.to, self.cc, self.bcc):if i:if isinstance(i, MutableSequence):recipients += ielse:recipients.append(i)session = self._get_session()if self.verbose:print(timestamp(), \"\")session.sendmail(self.from_, recipients, self.message.as_string())session.quit()if self.verbose:print(timestamp(), \"\")if self.verbose:print(timestamp(),type(self).__name__ + \"\",self.__str__(indentation=\"\"),)print(\"\")", "docstring": "Send the message.\nFirst, a message is constructed, then a session with the email\nservers is created, finally the message is sent and the session\nis stopped.", "id": "f6678:c0:m11"} {"signature": "def _get_session(self):", "body": "if self.port in (, \"\"):session = self._get_ssl()elif self.port in (, \"\"):session = self._get_tls()try:session.login(self.from_, self._auth)except SMTPResponseException as e:raise MessageSendError(e.smtp_error.decode(\"\"))return session", "docstring": "Start session with email server.", "id": "f6678:c0:m8"} {"signature": "def _get_ssl(self):", "body": "return smtplib.SMTP_SSL(self.server, self.port, context=ssl.create_default_context())", "docstring": "Get an SMTP session with SSL.", "id": "f6678:c0:m9"} {"signature": "def _add_body(self):", "body": "if self.body:b = MIMEText(\"\", \"\")b.set_payload(self.body)self.message.attach(b)", "docstring": "Add body content of email.", "id": "f6678:c0:m6"} {"signature": "def send_async(self):", "body": "MESSAGELOOP.add_message(self)", "docstring": "Send message asynchronously.", "id": "f6678:c0:m12"} {"signature": "def __str__(self, indentation=\"\"):", "body": "return (\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\".format(indentation,self.server,self.port,indentation,self.from_,indentation,self.to,indentation,self.cc,indentation,self.bcc,indentation,self.subject,indentation,reprlib.repr(self.body),indentation,self.attachments,))", "docstring": "print(Email(**args)) method.\n Indentation value can be overridden in the function call.\n The default is new line", "id": "f6678:c0:m1"} {"signature": "@main.command(\"\")@click.argument(\"\", type=click.STRING, required=False)@click.argument(\"\", type=click.STRING, default=\"\", required=False)@option(\"\", \"\", \"\", help=\"\")@option(\"\", \"\", multiple=True, help=\"\")@option(\"\", \"\", help=\"\")@option(\"\",\"\",\"\",multiple=True,help=\"\",)@option(\"\", \"\", is_flag=True, help=\"\")@option(\"\",\"\",is_flag=True,help=\"\",)@click.pass_contextdef main_whatsapp(ctx, **kwds):", "body": "send_message(\"\", kwds)", "docstring": "Send WhatsApp text message via the Twilio API.\n\n * [PROFILE]: Pre-configured user profile.\n\n * [BODY]: Message body text.\n\n * Example: messages whatsapp myWhatsAppProfile 'hello from whatsapp' -t '+12223334444' --verbose", "id": "f6679:m10"} {"signature": "def send_async(self):", "body": "MESSAGELOOP.add_message(self)", "docstring": "Send message asynchronously.", "id": "f6681:c0:m3"} {"signature": "def send(self):", "body": "url = (\"\"+ self._auth[]+ \"\")data = {\"\": self.from_,\"\": self.to,\"\": self.body,\"\": self.attachments,}if self.verbose:print(\"\"\"\"\"\".format(timestamp()))try:resp = requests.post(url, data=data, auth=(self._auth[], self._auth[]))resp.raise_for_status()except requests.exceptions.RequestException as e:exc = \"\".format(e, resp.json()[\"\"])raise MessageSendError(exc)self.sid = resp.json()[\"\"]if self.verbose:print(timestamp(),type(self).__name__ + \"\",self.__str__(indentation=\"\"),\"\",resp.status_code,)print(\"\")return resp", "docstring": "Send the SMS/MMS message.\nSet self.sid to return code of message.", "id": "f6681:c0:m2"} {"signature": "def send(self, encoding=\"\"):", "body": "self._construct_message()if self.verbose:print(\"\"\"\"\"\".format(timestamp()))if encoding == \"\":resp = requests.post(self.url, json=self.message)elif encoding == \"\":resp = requests.post(self.url, data=self.message)try:resp.raise_for_status()if resp.history and resp.history[].status_code >= :raise MessageSendError(\"\")elif \"\" in resp.text:raise MessageSendError(\"\")except (requests.exceptions.HTTPError, MessageSendError) as e:raise MessageSendError(e)if self.verbose:print(timestamp(),type(self).__name__,\"\",self.__str__(indentation=\"\"),\"\",resp.status_code,)print(\"\")", "docstring": "Send the message via HTTP POST, default is json-encoded.", "id": "f6684:c0:m2"} {"signature": "def send_async(self):", "body": "MESSAGELOOP.add_message(self)", "docstring": "Send message asynchronously.", "id": "f6684:c0:m3"} {"signature": "def _construct_message(self):", "body": "self.message = {\"\": self._auth, \"\": self.channel}super()._construct_message()", "docstring": "Set the message token/channel, then call the bas class constructor.", "id": "f6684:c2:m2"} {"signature": "def send(msg_type, send_async=False, *args, **kwargs):", "body": "message = message_factory(msg_type, *args, **kwargs)try:if send_async:message.send_async()else:message.send()except MessageSendError as e:err_exit(\"\", e)", "docstring": "Constructs a message class and sends the message.\nDefaults to sending synchronously. Set send_async=True to send\nasynchronously.\n\nArgs:\n :msg_type: (str) the type of message to send, i.e. 'Email'\n :send_async: (bool) default is False, set True to send asynchronously.\n :kwargs: (dict) keywords arguments that are required for the\n various message types. See docstrings for each type.\n i.e. help(messages.Email), help(messages.Twilio), etc.\n\nExample:\n >>> kwargs = {\n from_: 'me@here.com',\n to: 'you@there.com',\n auth: 'yourPassword',\n subject: 'Email Subject',\n body: 'Your message to send',\n attachments: ['filepath1', 'filepath2'],\n }\n >>> messages.send('email', **kwargs)\n Message sent...", "id": "f6685:m0"} {"signature": "def check_valid(msg_type, attr, value, func, exec_info):", "body": "if value is not None:if isinstance(value, MutableSequence):for v in value:if not func(v):raise InvalidMessageInputError(msg_type, attr, value, exec_info)else:if not func(value):raise InvalidMessageInputError(msg_type, attr, value, exec_info)", "docstring": "Checker function all validate_* functions below will call.\nRaises InvalidMessageInputError if input is not valid as per\ngiven func.", "id": "f6686:m3"} {"signature": "def validate_slackwebhook(attr, value):", "body": "check_valid(\"\", attr, value, validus.isurl, \"\")", "docstring": "SlackWebhook input validator function.", "id": "f6686:m6"} {"signature": "def validate_whatsapp(attr, value):", "body": "if attr in (\"\", \"\"):if value is not None and \"\" in value:value = value.split(\"\")[-]check_valid(\"\",attr,value,validus.isint,\"\",)elif attr in (\"\"):check_valid(\"\", attr, value, validus.isurl, \"\")", "docstring": "WhatsApp input validator function.", "id": "f6686:m9"} {"signature": "def validate_twilio(attr, value):", "body": "if attr in (\"\", \"\"):check_valid(\"\", attr, value, validus.isphone, \"\")elif attr in (\"\"):check_valid(\"\", attr, value, validus.isurl, \"\")", "docstring": "Twilio input validator function.", "id": "f6686:m5"} {"signature": "def verbatim_tags(parser, token, endtagname):", "body": "text_and_nodes = []while :token = parser.tokens.pop()if token.contents == endtagname:breakif token.token_type == template.base.TOKEN_VAR:text_and_nodes.append('')text_and_nodes.append(token.contents)elif token.token_type == template.base.TOKEN_TEXT:text_and_nodes.append(token.contents)elif token.token_type == template.base.TOKEN_BLOCK:try:command = token.contents.split()[]except IndexError:parser.empty_block_tag(token)try:compile_func = parser.tags[command]except KeyError:parser.invalid_block_tag(token, command, None)try:node = compile_func(parser, token)except template.TemplateSyntaxError as e:if not parser.compile_function_error(token, e):raisetext_and_nodes.append(node)if token.token_type == template.base.TOKEN_VAR:text_and_nodes.append('')return text_and_nodes", "docstring": "Javascript templates (jquery, handlebars.js, mustache.js) use constructs like:\n\n::\n\n {{if condition}} print something{{/if}}\n\nThis, of course, completely screws up Django templates,\nbecause Django thinks {{ and }} means something.\n\nThe following code preserves {{ }} tokens.\n\nThis version of verbatim template tag allows you to use tags\nlike url {% url name %}. {% trans \"foo\" %} or {% csrf_token %} within.", "id": "f6688:m0"} {"signature": "def _default_ns_prefix(nsmap):", "body": "if None in nsmap:default_url = nsmap[None]prefix = Nonefor key, val in nsmap.iteritems():if val == default_url and key is not None:prefix = keybreakelse:raise ValueError(\"\".format(url=default_url))return prefixraise ValueError(\"\")", "docstring": "XML doc may have several prefix:namespace_url pairs, can also specify\na namespace_url as default, tags in that namespace don't need a prefix\nNOTE:\nwe rely on default namespace also present in prefixed form, I'm not sure if\nthis is an XML certainty or a quirk of the eBay WSDLs\n\nin our case the WSDL contains:\n \n 1.0.0\n \n...but our query needs to give a prefix to the path of `Version` so we need\nto determine the default namespace of the doc, find the matching prefix and\nreturn it", "id": "f6696:m1"} {"signature": "def version_from_schema(schema_el):", "body": "vc_el = schema_elwhile True:vc_el = vc_el.getprevious()if vc_el is None:breakif vc_el.tag is etree.Comment:match = VERSION_COMMENT.search(vc_el.text)if match:try:return match.group()except IndexError:passraise VersionNotFound('')", "docstring": "returns: API version number \nraises: \n\nNOTE:\nrelies on presence of comment tags in the XSD, which are currently present\nfor both ebaySvc.xsd (TradingAPI) and ShoppingService.xsd (ShoppingAPI)", "id": "f6696:m0"} {"signature": "def make_auth_headers():", "body": "path = os.path.expanduser(\"\")if not os.path.exists(path):raise RuntimeError(\"\"\"\")with open(path) as f:token = f.read().strip()headers = {'': ''.format(token),}return headers", "docstring": "Make the authentication headers needed to use the Appveyor API.", "id": "f6700:m0"} {"signature": "def ensure_dirs(filename):", "body": "dirname = os.path.dirname(filename)if dirname and not os.path.exists(dirname):os.makedirs(dirname)", "docstring": "Make sure the directories exist for `filename`.", "id": "f6700:m2"} {"signature": "@return_errordef apply_with_return_error(args):", "body": "return args[](*args[:])", "docstring": "args is a tuple where the first argument is a callable.\n\neg::\n\n apply_with_return_error((func, 1, 2, 3)) - this will call func(1, 2, 3)", "id": "f6705:m1"} {"signature": "def debug_storage(storage, base_info=False, chars=True, runs=False):", "body": "import codecsimport localeimport sysif six.PY2:stderr = codecs.getwriter(locale.getpreferredencoding())(sys.stderr)else:stderr = sys.stderrcaller = inspect.stack()[][]stderr.write('' % caller)if base_info:stderr.write(u'' % storage[''])stderr.write(u'' % storage[''])if runs:stderr.write(u'' % list(storage['']))if chars:output = u''for _ch in storage['']:if _ch != '':output += _ch['']else:output += ''stderr.write(output + u'')output = u'' % u''.join([six.text_type(_ch['']) for _ch in storage['']])stderr.write(output)_types = [_ch[''].ljust() for _ch in storage['']]for i in range():if i:output = u''else:output = u''stderr.write(output % u''.join([_t[i] for _t in _types]))", "docstring": "Display debug information for the storage", "id": "f6710:m3"} {"signature": "def get_empty_storage():", "body": "return {'': None,'': None,'': [],'': deque(),}", "docstring": "Return an empty storage skeleton, usable for testing", "id": "f6710:m14"} {"signature": "def apply_mirroring(storage, debug):", "body": "for _ch in storage['']:unichar = _ch['']if mirrored(unichar) and_embedding_direction(_ch['']) == '':_ch[''] = MIRRORED.get(unichar, unichar)if debug:debug_storage(storage)", "docstring": "Applies L4: mirroring\n\n See: http://unicode.org/reports/tr9/#L4", "id": "f6710:m13"} {"signature": "def resolve_neutral_types(storage, debug):", "body": "for run in storage['']:start, length = run[''], run['']chars = [{'': run['']}] + storage[''][start:start+length] +[{'': run['']}]total_chars = len(chars)seq_start = Nonefor idx in range(total_chars):_ch = chars[idx]if _ch[''] in ('', '', '', ''):if seq_start is None:seq_start = idxprev_bidi_type = chars[idx-]['']else:if seq_start is not None:next_bidi_type = chars[idx]['']if prev_bidi_type in ('', ''):prev_bidi_type = ''if next_bidi_type in ('', ''):next_bidi_type = ''for seq_idx in range(seq_start, idx):if prev_bidi_type == next_bidi_type:chars[seq_idx][''] = prev_bidi_typeelse:chars[seq_idx][''] =_embedding_direction(chars[seq_idx][''])seq_start = Noneif debug:debug_storage(storage)", "docstring": "Resolving neutral types. Implements N1 and N2\n\n See: http://unicode.org/reports/tr9/#Resolving_Neutral_Types", "id": "f6710:m9"} {"signature": "def _encode_fields(self, xfield, yfield, time_unit=None,scale=Scale(zero=False)):", "body": "if scale is None:scale = Scale()xfieldtype = xfield[]yfieldtype = yfield[]x_options = Noneif len(xfield) > :x_options = xfield[]y_options = Noneif len(yfield) > :y_options = yfield[]if time_unit is not None:if x_options is None:xencode = X(xfieldtype, timeUnit=time_unit)else:xencode = X(xfieldtype,axis=Axis(**x_options),timeUnit=time_unit,scale=scale)else:if x_options is None:xencode = X(xfieldtype)else:xencode = X(xfieldtype,axis=Axis(**x_options),scale=scale)if y_options is None:yencode = Y(yfieldtype, scale=scale)else:yencode = Y(yfieldtype,axis=Axis(**y_options),scale=scale)return xencode, yencode", "docstring": "Encode the fields in Altair format", "id": "f6714:c0:m10"} {"signature": "def gen(self, slug, name, dataobj, xfield, yfield, time_unit=None,chart_type=\"\", width=,height=, color=Color(), size=Size(),scale=Scale(zero=False), shape=Shape(), filepath=None,html_before=\"\", html_after=\"\"):", "body": "chart_obj = self.serialize(dataobj, xfield, yfield, time_unit,chart_type, width, height, color, size, scale, shape)html = self.html(slug, name, chart_obj, filepath,html_before, html_after)return html", "docstring": "Generates an html chart from either a pandas dataframe, a dictionnary,\na list or an Altair Data object and optionally write it to a file", "id": "f6714:c0:m0"} {"signature": "def smart_account(app):", "body": "if os.environ[''] == '':returnfrom flask_security import SQLAlchemyUserDatastore, Securityaccount_module_name, account_class_name = os.environ[''].rsplit('', )account_module = importlib.import_module(account_module_name)account_class = getattr(account_module, account_class_name)role_module_name, role_class_name = os.environ[''].rsplit('', )role_module = importlib.import_module(role_module_name)role_class = getattr(role_module, role_class_name)r = True if os.environ[''] != '' else FalseSecurity(app,SQLAlchemyUserDatastore(app.db, account_class, role_class),register_blueprint=r)pass", "docstring": "\u5c1d\u8bd5\u4f7f\u7528\u5185\u7f6e\u65b9\u5f0f\u6784\u5efa\u8d26\u6237", "id": "f6727:m2"} {"signature": "def load_tasks(app, entry_file=None):", "body": "from celery import Tasktasks_txt = os.path.join(os.path.dirname(entry_file), '','')if not os.path.exists(tasks_txt):import sysprint('' % tasks_txt)sys.exit(-)class ContextTask(Task):abstract = Truedef __call__(self, *args, **kwargs):with app.app_context():return super().__call__(*args, **kwargs)app.celery.config_from_object(app.config, namespace='')app.celery.Task = ContextTaskwith app.app_context():with open(tasks_txt, '') as f:for line in f:mod = line.strip('')if mod:importlib.import_module(mod + '')passpasspasspass", "docstring": "\u88c5\u8f7d\u4efb\u52a1\uff0c\u89e3\u51b3celery\u65e0\u6cd5\u81ea\u52a8\u88c5\u8f7d\u7684\u95ee\u9898", "id": "f6727:m4"} {"signature": "def write_file(fobj, opts=None, data=None, eopts=None):", "body": "lines = []lines.append(\"\")if opts:lines.append(\"\" + \"\".join(opts))if eopts:lines.append(\"\" + eopts)if not data:data = [[, , ]]for row in data:lines.append(\"\".join([str(item) for item in row]))lines = [item + \"\" for item in lines]fobj.writelines(lines)", "docstring": "Write a sample Touchstone file.", "id": "f6737:m4"} {"signature": "def log(line, append=True):", "body": "with open(os.path.join(os.environ[\"\"], \"\"), \"\" if append else \"\") as fobj:fobj.write(\"\".format(line))", "docstring": "Debug xdist.", "id": "f6738:m0"} {"signature": "def to_sci_string(number):", "body": "mant, exp = peng.functions._to_eng_tuple(number)return \"\".format(mant=mant, exp_sign=\"\" if exp < else \"\", exp=abs(exp))", "docstring": "Return string with the number formatted in scientific notation.\n\nThis function does not have all the configurability of the public function\nto_scientific_string, it is a convenience function to test _to_eng_tuple", "id": "f6739:m0"} {"signature": "def full_fft():", "body": "wobj, fsample, finc = fft_wave()npoints = len(wobj.indep_vector)ret_indep_vector = barange(-fsample / , +fsample / , finc)ret_dep_vector = fft(wobj.dep_vector, npoints)return npoints, wobj, ret_indep_vector, ret_dep_vector", "docstring": "FFT of waveform where independent axis is evenly spaced and a power of 2.", "id": "f6743:m2"} {"signature": "def std_wobj(dep_name,indep_vector=None,dep_vector=None,interp=\"\",dep_units=\"\",indep_scale=\"\",):", "body": "indep_vector = np.array([, , ]) if indep_vector is None else indep_vectordep_vector = np.array([, , ]) if dep_vector is None else dep_vectorreturn peng.Waveform(indep_vector=indep_vector,dep_vector=dep_vector,dep_name=dep_name,indep_scale=indep_scale,dep_scale=\"\",indep_units=\"\",dep_units=dep_units,interp=interp,)", "docstring": "Return a waveform with fixed parameters and given name.", "id": "f6744:m1"} {"signature": "def _chunk_pars(freq_vector, data_matrix, pformat):", "body": "pformat = pformat.upper()length = for freq, data in zip(freq_vector, data_matrix):data = data.flatten()for index in range(, data.size, length):fpoint = [freq] if not index else [None]cdata = data[index : index + length]if pformat == \"\":vector1 = np.abs(cdata)vector2 = np.rad2deg(np.angle(cdata))elif pformat == \"\":vector1 = np.real(cdata)vector2 = np.imag(cdata)else: vector1 = * np.log10(np.abs(cdata))vector2 = np.rad2deg(np.angle(cdata))sep_data = np.array([])for item1, item2 in zip(vector1, vector2):sep_data = np.concatenate((sep_data, np.array([item1, item2])))ret = np.concatenate((np.array(fpoint), sep_data))yield ret", "docstring": "Chunk input data into valid Touchstone file rows.", "id": "f6747:m1"} {"signature": "@pexdoc.pcontracts.new_contract()def touchstone_noise_data(obj):", "body": "if isinstance(obj, dict) and (not obj):return Noneif (not isinstance(obj, dict)) or (isinstance(obj, dict)and (sorted(obj.keys()) != sorted([\"\", \"\", \"\", \"\", \"\"]))):raise ValueError(pexdoc.pcontracts.get_exdesc())if not (isinstance(obj[\"\"], int) and (obj[\"\"] > )):raise ValueError(pexdoc.pcontracts.get_exdesc())if _check_increasing_real_numpy_vector(obj[\"\"]):raise ValueError(pexdoc.pcontracts.get_exdesc())if _check_real_numpy_vector(obj[\"\"]):raise ValueError(pexdoc.pcontracts.get_exdesc())if _check_number_numpy_vector(obj[\"\"]):raise ValueError(pexdoc.pcontracts.get_exdesc())if not (isinstance(obj[\"\"], np.ndarray)and (len(obj[\"\"].shape) == )and (obj[\"\"].shape[] > )and np.all(obj[\"\"] >= )):raise ValueError(pexdoc.pcontracts.get_exdesc())sizes = [obj[\"\"].size, obj[\"\"].size, obj[\"\"].size, obj[\"\"].size]if set(sizes) != set([obj[\"\"]]):raise ValueError(pexdoc.pcontracts.get_exdesc())return None", "docstring": "r\"\"\"\n Validate if an object is an :ref:`TouchstoneNoiseData` pseudo-type object.\n\n :param obj: Object\n :type obj: any\n\n :raises: RuntimeError (Argument \\`*[argument_name]*\\` is not valid). The\n token \\*[argument_name]\\* is replaced by the name of the argument the\n contract is attached to\n\n :rtype: None", "id": "f6748:m9"} {"signature": "@pexdoc.pcontracts.new_contract()def engineering_notation_number(obj):", "body": "try:obj = obj.rstrip()float(obj[:-] if obj[-] in _SUFFIX_TUPLE else obj)return Noneexcept (AttributeError, IndexError, ValueError):raise ValueError(pexdoc.pcontracts.get_exdesc())", "docstring": "r\"\"\"\n Validate if an object is an :ref:`EngineeringNotationNumber` pseudo-type object.\n\n :param obj: Object\n :type obj: any\n\n :raises: RuntimeError (Argument \\`*[argument_name]*\\` is not valid). The\n token \\*[argument_name]\\* is replaced by the name of the argument the\n contract is attached to\n\n :rtype: None", "id": "f6748:m3"} {"signature": "@pexdoc.pcontracts.new_contract()def touchstone_options(obj):", "body": "if (not isinstance(obj, dict)) or (isinstance(obj, dict)and (sorted(obj.keys()) != sorted([\"\", \"\", \"\", \"\"]))):raise ValueError(pexdoc.pcontracts.get_exdesc())if not ((obj[\"\"].lower() in [\"\", \"\", \"\", \"\"])and (obj[\"\"].lower() in [\"\", \"\", \"\", \"\", \"\"])and (obj[\"\"].lower() in [\"\", \"\", \"\"])and isinstance(obj[\"\"], float)and (obj[\"\"] >= )):raise ValueError(pexdoc.pcontracts.get_exdesc())", "docstring": "r\"\"\"\n Validate if an object is an :ref:`TouchstoneOptions` pseudo-type object.\n\n :param obj: Object\n :type obj: any\n\n :raises: RuntimeError (Argument \\`*[argument_name]*\\` is not valid). The\n token \\*[argument_name]\\* is replaced by the name of the argument the\n contract is attached to\n\n :rtype: None", "id": "f6748:m10"} {"signature": "@pexdoc.pcontracts.new_contract()def touchstone_data(obj):", "body": "if (not isinstance(obj, dict)) or (isinstance(obj, dict)and (sorted(obj.keys()) != sorted([\"\", \"\", \"\"]))):raise ValueError(pexdoc.pcontracts.get_exdesc())if not (isinstance(obj[\"\"], int) and (obj[\"\"] > )):raise ValueError(pexdoc.pcontracts.get_exdesc())if _check_increasing_real_numpy_vector(obj[\"\"]):raise ValueError(pexdoc.pcontracts.get_exdesc())if not isinstance(obj[\"\"], np.ndarray):raise ValueError(pexdoc.pcontracts.get_exdesc())vdata = [\"\", \"\", \"\"]if not any([obj[\"\"].dtype.name.startswith(item) for item in vdata]):raise ValueError(pexdoc.pcontracts.get_exdesc())if obj[\"\"].size != obj[\"\"]:raise ValueError(pexdoc.pcontracts.get_exdesc())nports = int(math.sqrt(obj[\"\"].size / obj[\"\"].size))if obj[\"\"] * (nports ** ) != obj[\"\"].size:raise ValueError(pexdoc.pcontracts.get_exdesc())", "docstring": "r\"\"\"\n Validate if an object is an :ref:`TouchstoneData` pseudo-type object.\n\n :param obj: Object\n :type obj: any\n\n :raises: RuntimeError (Argument \\`*[argument_name]*\\` is not valid). The\n token \\*[argument_name]\\* is replaced by the name of the argument the\n contract is attached to\n\n :rtype: None", "id": "f6748:m8"} {"signature": "@pexdoc.pcontracts.new_contract()def increasing_real_numpy_vector(obj):", "body": "if _check_increasing_real_numpy_vector(obj):raise ValueError(pexdoc.pcontracts.get_exdesc())", "docstring": "r\"\"\"\n Validate if an object is :ref:`IncreasingRealNumpyVector` pseudo-type object.\n\n :param obj: Object\n :type obj: any\n\n :raises: RuntimeError (Argument \\`*[argument_name]*\\` is not valid). The\n token \\*[argument_name]\\* is replaced by the name of the argument the\n contract is attached to\n\n :rtype: None", "id": "f6748:m5"} {"signature": "@pexdoc.pcontracts.new_contract()def wave_interp_option(obj):", "body": "exdesc = pexdoc.pcontracts.get_exdesc()if not isinstance(obj, str):raise ValueError(exdesc)if obj.upper() in [\"\", \"\"]:return Noneraise ValueError(exdesc)", "docstring": "r\"\"\"\n Validate if an object is a :ref:`WaveInterpOption` pseudo-type object.\n\n :param obj: Object\n :type obj: any\n\n :raises: RuntimeError (Argument \\`*[argument_name]*\\` is not valid). The\n token \\*[argument_name]\\* is replaced by the name of the argument the\n contract is attached to\n\n :rtype: None", "id": "f6748:m11"} {"signature": "@pexdoc.pcontracts.contract(suffix=\"\", offset=int)def peng_suffix_math(suffix, offset):", "body": "eobj = pexdoc.exh.addex(ValueError, \"\")try:return _POWER_TO_SUFFIX_DICT[_SUFFIX_TO_POWER_DICT[suffix] + * offset]except KeyError:eobj(True)", "docstring": "r\"\"\"\n Return engineering suffix from a starting suffix and an number of suffixes offset.\n\n :param suffix: Engineering suffix\n :type suffix: :ref:`EngineeringNotationSuffix`\n\n :param offset: Engineering suffix offset\n :type offset: integer\n\n :rtype: string\n\n .. [[[cog cog.out(exobj_eng.get_sphinx_autodoc()) ]]]\n .. Auto-generated exceptions documentation for\n .. peng.functions.peng_suffix_math\n\n :raises:\n * RuntimeError (Argument \\`offset\\` is not valid)\n\n * RuntimeError (Argument \\`suffix\\` is not valid)\n\n * ValueError (Argument \\`offset\\` is not valid)\n\n .. [[[end]]]\n\n For example:\n\n >>> import peng\n >>> peng.peng_suffix_math('u', 6)\n 'T'", "id": "f6749:m18"} {"signature": "def _to_eng_tuple(number):", "body": "split = lambda x, p: (x.ljust( + neg, \"\")[:p], x[p:].rstrip(\"\"))mant, exp = to_scientific_tuple(number)mant, neg = mant.replace(\"\", \"\"), mant.startswith(\"\")new_mant = \"\".join(filter(None, split(mant, + (exp % ) + neg)))new_exp = int( * math.floor(exp / ))return NumComp(new_mant, new_exp)", "docstring": "Return tuple with mantissa and exponent of number formatted in engineering notation.\n\n:param number: Number\n:type number: integer or float\n\n:rtype: tuple", "id": "f6749:m9"} {"signature": "def _split_every(text, sep, count, lstrip=False, rstrip=False):", "body": "ltr = \"\"[ * lstrip + rstrip].strip()func = lambda x: getattr(x, ltr + \"\")() if ltr != \"\" else xitems = text.split(sep)groups = zip_longest(*[iter(items)] * count, fillvalue=\"\")joints = (sep.join(group).rstrip(sep) for group in groups)return tuple(func(joint) for joint in joints)", "docstring": "Return list of the words in the string, using count of a separator as delimiter.\n\n:param text: String to split\n:type text: string\n\n:param sep: Separator\n:type sep: string\n\n:param count: Number of separators to use as delimiter\n:type count: integer\n\n:param lstrip: Flag that indicates whether whitespace is removed\n from the beginning of each list item (True) or not\n (False)\n:type lstrip: boolean\n\n:param rstrip: Flag that indicates whether whitespace is removed\n from the end of each list item (True) or not (False)\n:type rstrip: boolean\n\n:rtype: tuple", "id": "f6749:m8"} {"signature": "@pexdoc.pcontracts.contract(snum=\"\")def peng_float(snum):", "body": "snum = snum.rstrip()power = _SUFFIX_POWER_DICT[\"\" if snum[-].isdigit() else snum[-]]return float(snum if snum[-].isdigit() else snum[:-]) * power", "docstring": "r\"\"\"\n Return floating point equivalent of a number represented in engineering notation.\n\n :param snum: Number\n :type snum: :ref:`EngineeringNotationNumber`\n\n :rtype: string\n\n .. [[[cog cog.out(exobj_eng.get_sphinx_autodoc()) ]]]\n .. Auto-generated exceptions documentation for\n .. peng.functions.peng_float\n\n :raises: RuntimeError (Argument \\`snum\\` is not valid)\n\n .. [[[end]]]\n\n For example:\n\n >>> import peng\n >>> peng.peng_float(peng.peng(1235.6789E3, 3, False))\n 1236000.0", "id": "f6749:m12"} {"signature": "@pexdoc.pcontracts.contract(number=\"\")def no_exp(number):", "body": "mant, exp = to_scientific_tuple(number)if not exp:return str(number)floating_mant = \"\" in mantmant = mant.replace(\"\", \"\")if exp < :return \"\" + \"\" * (-exp - ) + mantif not floating_mant:return mant + \"\" * exp + (\"\" if isinstance(number, float) else \"\")lfpart = len(mant) - if lfpart < exp:return (mant + \"\" * (exp - lfpart)).rstrip(\"\")return mant", "docstring": "r\"\"\"\n Convert number to string guaranteeing result is not in scientific notation.\n\n :param number: Number to convert\n :type number: integer or float\n\n :rtype: string\n\n .. [[[cog cog.out(exobj_eng.get_sphinx_autodoc()) ]]]\n .. Auto-generated exceptions documentation for peng.functions.no_exp\n\n :raises: RuntimeError (Argument \\`number\\` is not valid)\n\n .. [[[end]]]", "id": "f6749:m10"} {"signature": "def _remove_extra_delims(expr, ldelim=\"\", rdelim=\"\", fcount=None):", "body": "if not expr.strip():return \"\"fcount = [] if fcount is None else fcounttfuncs = _get_functions(expr, ldelim=ldelim, rdelim=rdelim)for fdict in reversed(tfuncs):fcount[] += fdict[\"\"] = \"\" + str(fcount[])expr = expr[: fdict[\"\"]] + fdict[\"\"] + expr[fdict[\"\"] + :]fdict[\"\"] = _remove_extra_delims(fdict[\"\"], ldelim=ldelim, rdelim=rdelim, fcount=fcount)expr = _build_expr(_parse_expr(expr, ldelim=ldelim, rdelim=rdelim), ldelim=ldelim, rdelim=rdelim)for fdict in tfuncs:expr = expr.replace(fdict[\"\"], fdict[\"\"] + ldelim + fdict[\"\"] + rdelim)return expr", "docstring": "Remove unnecessary delimiters (parenthesis, brackets, etc.).\n\nInternal function that can be recursed", "id": "f6749:m7"} {"signature": "def __repr__(self):", "body": "template = (\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\")return template.format(repr(self._indep_vector),repr(self._dep_vector),repr(self._dep_name),repr(self._indep_scale),repr(self._dep_scale),repr(self._indep_units),repr(self._dep_units),repr(self._interp),)", "docstring": "r\"\"\"\n Return a string with the expression needed to re-create the object.\n\n For example:\n\n >>> import numpy as np\n >>> import peng\n >>> obj = peng.Waveform(\n ... np.array([1, 2, 3]),\n ... np.array([4, 5, 6]),\n ... 'test'\n ... )\n >>> repr(obj)\n \"peng.Waveform(indep_vector=array([1, 2, 3]), dep_vector=array([4, 5, 6]), dep_name='test', indep_scale='LINEAR', dep_scale='LINEAR', indep_units='', dep_units='', interp='CONTINUOUS')\"", "id": "f6751:c0:m31"} {"signature": "def _get_indep_vector(wave_a, wave_b):", "body": "exobj = pexdoc.exh.addex(RuntimeError, \"\")min_bound = max(np.min(wave_a.indep_vector), np.min(wave_b.indep_vector))max_bound = min(np.max(wave_a.indep_vector), np.max(wave_b.indep_vector))exobj(bool(min_bound > max_bound))raw_range = np.unique(np.concatenate((wave_a.indep_vector, wave_b.indep_vector)))return raw_range[np.logical_and(min_bound <= raw_range, raw_range <= max_bound)]", "docstring": "Create new independent variable vector.", "id": "f6751:m2"} {"signature": "def __floordiv__(self, other):", "body": "return self._operation(other, \"\")", "docstring": "Integer-divide the dependent variable vector of a waveform.\n\nThe integer division may be by the dependent variable vector of another\nwaveform, the dependent variable vector of a waveform by a number, or a\nnumber by the dependent variable vector of a waveform. In the latter\ncase a :py:class:`peng.Waveform()` object is returned with the result.\n\nFor example:\n\n >>> import numpy as np\n >>> import peng\n >>> indep_vector = np.array([1, 2, 3])\n >>> dep_vector_a = np.array([4, 5, 6])\n >>> dep_vector_b = np.array([8, 2, 4])\n >>> obj_a = peng.Waveform(indep_vector, dep_vector_a, 'obj_a')\n >>> obj_b = peng.Waveform(indep_vector, dep_vector_b, 'obj_b')\n >>> print(obj_a//obj_b)\n Waveform: obj_a//obj_b\n Independent variable: [ 1, 2, 3 ]\n Dependent variable: [ 0, 2, 1 ]\n Independent variable scale: LINEAR\n Dependent variable scale: LINEAR\n Independent variable units: (None)\n Dependent variable units: (None)\n Interpolating function: CONTINUOUS\n\n.. [[[cog cog.out(exobj_eng.get_sphinx_autodoc(raised=True)) ]]]\n.. Auto-generated exceptions documentation for\n.. peng.wave_core.Waveform.__floordiv__\n\n:raises:\n * RuntimeError (Independent variable ranges do not overlap)\n\n * RuntimeError (Waveforms are not compatible)\n\n * TypeError (Complex operands not supported)\n\n * TypeError (Data type not supported)\n\n.. [[[end]]]", "id": "f6751:c0:m10"} {"signature": "def __rfloordiv__(self, other):", "body": "return self._operation(other, \"\", reflected=True)", "docstring": "Reflected floor (integer) division.\n\nSee :py:meth:`peng.Waveform.__floordiv__` for more details", "id": "f6751:c0:m32"} {"signature": "def __rrshift__(self, other):", "body": "return self._operation(other, \"\", reflected=True)", "docstring": "Reflected right shift.\n\nSee :py:meth:`peng.Waveform.__rshift__` for more details", "id": "f6751:c0:m38"} {"signature": "def __nonzero__(self): ", "body": "return not bool(np.all(np.isclose(self._dep_vector, np.zeros(len(self._dep_vector)), FP_RTOL, FP_ATOL)))", "docstring": "Test if the waveform dependent vector is zero for all its elements.\n\nFor example:\n\n >>> import numpy as np\n >>> import peng\n >>> indep_vector = np.array([1, 2, 3])\n >>> dep_vector = np.array([4, 5, 6])\n >>> obj = peng.Waveform(indep_vector, dep_vector, 'obj_a')\n >>> if obj:\n ... print('Boolean test returned: True')\n ... else:\n ... print('Boolean test returned: False')\n Boolean test returned: True\n >>> dep_vector = np.zeros(3)\n >>> obj = peng.Waveform(indep_vector, dep_vector, 'obj_a')\n >>> if obj:\n ... print('Boolean test returned: True')\n ... else:\n ... print('Boolean test returned: False')\n Boolean test returned: False", "id": "f6751:c0:m24"} {"signature": "def __rsub__(self, other):", "body": "return self._operation(other, \"\")", "docstring": "Reflected subtraction.\n\nSee :py:meth:`peng.Waveform.__sub__` for more details", "id": "f6751:c0:m40"} {"signature": "def __iter__(self):", "body": "for iitem, ditem in zip(self._indep_vector, self._dep_vector):yield iitem, ditem", "docstring": "Return an iterable over the independent and dependent variable vectors.\n\nEach item returned by an iterator is a :py:data:`peng.Point` named\ntuple\n\n:rtype: iterable", "id": "f6751:c0:m15"} {"signature": "def __rlshift__(self, other):", "body": "return self._operation(other, \"\", reflected=True)", "docstring": "Reflected left shift.\n\nSee :py:meth:`peng.Waveform.__lshift__` for more details", "id": "f6751:c0:m33"} {"signature": "def __ne__(self, other):", "body": "return not self.__eq__(other)", "docstring": "Test waveform inequality.\n\nTwo waveforms are considered unequal if their independent variable\nvectors are different with :py:data:`peng.FP_ATOL` absolute tolerance\nand :py:data:`peng.FP_RTOL` relative tolerance, and/or their dependent\nvariable vectors are different with :py:data:`peng.FP_ATOL` absolute\ntolerance and :py:data:`peng.FP_RTOL` relative tolerance, and/or their\nindependent variable scales are not the same, and/or their dependent\nvariable scales are not the, and/or their independent variable units\nare not the same, and/or their dependent variable units are not the\nsame and/or they do not have the same interpolation function.\n\nA waveform is considered unequal to a real number when its dependent\nvariable is not equal to that number at least in one element.", "id": "f6751:c0:m22"} {"signature": "def __mul__(self, other):", "body": "return self._operation(other, \"\")", "docstring": "Multiply the dependent variable vector of a waveform.\n\nThe multiplication may be computed element-by-element using another the\ndependent variable vector of another waveform or a number.\n\nFor example:\n\n >>> import numpy as np\n >>> import peng\n >>> indep_vector = np.array([1, 2, 3])\n >>> dep_vector_a = np.array([4, 5, 6])\n >>> dep_vector_b = np.array([8, 2, 4])\n >>> obj_a = peng.Waveform(indep_vector, dep_vector_a, 'obj_a')\n >>> obj_b = peng.Waveform(indep_vector, dep_vector_b, 'obj_b')\n >>> print(obj_a*obj_b)\n Waveform: obj_a*obj_b\n Independent variable: [ 1, 2, 3 ]\n Dependent variable: [ 32, 10, 24 ]\n Independent variable scale: LINEAR\n Dependent variable scale: LINEAR\n Independent variable units: (None)\n Dependent variable units: (None)\n Interpolating function: CONTINUOUS\n\n.. [[[cog cog.out(exobj_eng.get_sphinx_autodoc()) ]]]\n.. Auto-generated exceptions documentation for\n.. peng.wave_core.Waveform.__mul__\n\n:raises:\n * RuntimeError (Waveforms are not compatible)\n\n * TypeError (Data type not supported)\n\n.. [[[end]]]", "id": "f6751:c0:m21"} {"signature": "def __rmul__(self, other):", "body": "return self._operation(other, \"\")", "docstring": "Reflected multiplication.\n\nSee :py:meth:`peng.Waveform.__mul__` for more details", "id": "f6751:c0:m35"} {"signature": "def __or__(self, other):", "body": "return self._operation(other, \"\")", "docstring": "Bit-wise logic or the dependent variable of a waveform.\n\nThe other operand may be the dependent variable vector of another\nwaveform or a number.\n\nFor example:\n\n >>> import numpy as np\n >>> import peng\n >>> indep_vector = np.array([1, 2, 3])\n >>> dep_vector_a = np.array([4, 5, 6])\n >>> dep_vector_b = np.array([8, 2, 4])\n >>> obj_a = peng.Waveform(indep_vector, dep_vector_a, 'obj_a')\n >>> obj_b = peng.Waveform(indep_vector, dep_vector_b, 'obj_b')\n >>> print(obj_a|obj_b)\n Waveform: obj_a|obj_b\n Independent variable: [ 1, 2, 3 ]\n Dependent variable: [ 12, 7, 6 ]\n Independent variable scale: LINEAR\n Dependent variable scale: LINEAR\n Independent variable units: (None)\n Dependent variable units: (None)\n Interpolating function: CONTINUOUS\n\n.. [[[cog cog.out(exobj_eng.get_sphinx_autodoc(raised=True)) ]]]\n.. Auto-generated exceptions documentation for\n.. peng.wave_core.Waveform.__or__\n\n:raises:\n * RuntimeError (Independent variable ranges do not overlap)\n\n * RuntimeError (Waveforms are not compatible)\n\n * TypeError (Complex operands not supported)\n\n * TypeError (Data type not supported)\n\n.. [[[end]]]", "id": "f6751:c0:m25"} {"signature": "def __rmod__(self, other):", "body": "return self._operation(other, \"\", reflected=True)", "docstring": "Reflected division.\n\nSee :py:meth:`peng.Waveform.__mod__` for more details", "id": "f6751:c0:m34"} {"signature": "def __contains__(self, item):", "body": "if (not isinstance(item, tuple)) or (isinstance(item, tuple) and (len(item) != )):return Falsetchk = any([isinstance(item[], typ) for typ in [int, float]])if not tchk:return Falsetchk = any([isinstance(item[], typ) for typ in [int, float, complex]])if not tchk:return Falseindices = [np.where(self._indep_vector >= item[])[][]]if indices[]:indices.append(indices[] - )for index in indices:icmp = np.isclose(np.array([self._indep_vector[index]]),np.array([item[]]),FP_RTOL,FP_ATOL,)dcmp = np.isclose(np.array([self._dep_vector[index]]),np.array([item[]]),FP_RTOL,FP_ATOL,)if icmp and dcmp:return Truereturn False", "docstring": "Test if an item is a data point in the waveform.\n\nAn item is in a waveform when it is a tuple with the characteristics of\nthe :py:data:`peng.Point` named tuple and its independent and dependent\nvalues match a waveform's data point with :py:data:`peng.FP_ATOL`\nabsolute tolerance and :py:data:`peng.FP_RTOL` relative tolerance\n\n:param item: Object\n:type item: any", "id": "f6751:c0:m5"} {"signature": "def __le__(self, other):", "body": "return self._operation(other, \"\")", "docstring": "Test whether waveform is less than or equal to another waveform or real number.\n\nA waveform is less than or equal to another waveform or a number if\nall elements of its dependent variable vector are less than or equal\nto, element-by-element, the elements of the dependent variable vector\nof another waveform or a real number with :py:data:`peng.FP_ATOL`\nabsolute tolerance and :py:data:`peng.FP_RTOL` relative tolerance\n\n.. [[[cog cog.out(exobj_eng.get_sphinx_autodoc(raised=True)) ]]]\n.. Auto-generated exceptions documentation for\n.. peng.wave_core.Waveform.__le__\n\n:raises:\n * RuntimeError (Independent variable ranges do not overlap)\n\n * RuntimeError (Waveforms are not compatible)\n\n * TypeError (Complex operands not supported)\n\n * TypeError (Data type not supported)\n\n.. [[[end]]]", "id": "f6751:c0:m16"} {"signature": "@pexdoc.pcontracts.contract(wave=Waveform)def cos(wave):", "body": "return _operation(wave, \"\", \"\", np.cos)", "docstring": "r\"\"\"\n Return the cosine of a waveform's dependent variable vector.\n\n :param wave: Waveform\n :type wave: :py:class:`peng.eng.Waveform`\n\n :rtype: :py:class:`peng.eng.Waveform`\n\n .. [[[cog cog.out(exobj_eng.get_sphinx_autodoc()) ]]]\n .. Auto-generated exceptions documentation for peng.wave_functions.cos\n\n :raises: RuntimeError (Argument \\`wave\\` is not valid)\n\n .. [[[end]]]", "id": "f6752:m14"} {"signature": "@pexdoc.pcontracts.contract(wave=Waveform)def asin(wave):", "body": "pexdoc.exh.addex(ValueError,\"\",bool((min(wave._dep_vector) < -) or (max(wave._dep_vector) > )),)return _operation(wave, \"\", \"\", np.arcsin)", "docstring": "r\"\"\"\n Return the arc sine of a waveform's dependent variable vector.\n\n :param wave: Waveform\n :type wave: :py:class:`peng.eng.Waveform`\n\n :rtype: :py:class:`peng.eng.Waveform`\n\n .. [[[cog cog.out(exobj_eng.get_sphinx_autodoc()) ]]]\n .. Auto-generated exceptions documentation for\n .. peng.wave_functions.asin\n\n :raises:\n * RuntimeError (Argument \\`wave\\` is not valid)\n\n * ValueError (Math domain error)\n\n .. [[[end]]]", "id": "f6752:m8"} {"signature": "@pexdoc.pcontracts.contract(wave=Waveform)def wfloat(wave):", "body": "pexdoc.exh.addex(TypeError,\"\",wave._dep_vector.dtype.name.startswith(\"\"),)ret = copy.copy(wave)ret._dep_vector = ret._dep_vector.astype(np.float)return ret", "docstring": "r\"\"\"\n Convert a waveform's dependent variable vector to float.\n\n :param wave: Waveform\n :type wave: :py:class:`peng.eng.Waveform`\n\n :rtype: :py:class:`peng.eng.Waveform`\n\n .. [[[cog cog.out(exobj_eng.get_sphinx_autodoc()) ]]]\n .. Auto-generated exceptions documentation for\n .. peng.wave_functions.wfloat\n\n :raises:\n * RuntimeError (Argument \\`wave\\` is not valid)\n\n * TypeError (Cannot convert complex to float)\n\n .. [[[end]]]", "id": "f6752:m52"} {"signature": "@pexdoc.pcontracts.contract(wave=Waveform, indep_min=\"\", indep_max=\"\")def average(wave, indep_min=None, indep_max=None):", "body": "ret = copy.copy(wave)_bound_waveform(ret, indep_min, indep_max)area = _running_area(ret._indep_vector, ret._dep_vector)area[] = ret._dep_vector[]deltas = ret._indep_vector - ret._indep_vector[]deltas[] = ret._dep_vector = np.divide(area, deltas)ret.dep_name = \"\".format(ret._dep_name)return ret", "docstring": "r\"\"\"\n Return the running average of a waveform's dependent variable vector.\n\n :param wave: Waveform\n :type wave: :py:class:`peng.eng.Waveform`\n\n :param indep_min: Independent vector start point of computation\n :type indep_min: integer or float\n\n :param indep_max: Independent vector stop point of computation\n :type indep_max: integer or float\n\n :rtype: :py:class:`peng.eng.Waveform`\n\n .. [[[cog cog.out(exobj_eng.get_sphinx_autodoc(raised=True)) ]]]\n .. Auto-generated exceptions documentation for\n .. peng.wave_functions.average\n\n :raises:\n * RuntimeError (Argument \\`indep_max\\` is not valid)\n\n * RuntimeError (Argument \\`indep_min\\` is not valid)\n\n * RuntimeError (Argument \\`wave\\` is not valid)\n\n * RuntimeError (Incongruent \\`indep_min\\` and \\`indep_max\\`\n arguments)\n\n .. [[[end]]]", "id": "f6752:m12"} {"signature": "@pexdoc.pcontracts.contract(wave=Waveform,npoints=\"\",indep_min=\"\",indep_max=\"\",unwrap=bool,rad=bool,)def ifftp(wave, npoints=None, indep_min=None, indep_max=None, unwrap=True, rad=True):", "body": "return phase(ifft(wave, npoints, indep_min, indep_max), unwrap=unwrap, rad=rad)", "docstring": "r\"\"\"\n Return the phase of the inverse Fast Fourier Transform of a waveform.\n\n :param wave: Waveform\n :type wave: :py:class:`peng.eng.Waveform`\n\n :param npoints: Number of points to use in the transform. If **npoints**\n is less than the size of the independent variable vector\n the waveform is truncated; if **npoints** is greater than\n the size of the independent variable vector, the waveform\n is zero-padded\n :type npoints: positive integer\n\n :param indep_min: Independent vector start point of computation\n :type indep_min: integer or float\n\n :param indep_max: Independent vector stop point of computation\n :type indep_max: integer or float\n\n :param unwrap: Flag that indicates whether phase should change phase shifts\n to their :code:`2*pi` complement (True) or not (False)\n :type unwrap: boolean\n\n :param rad: Flag that indicates whether phase should be returned in radians\n (True) or degrees (False)\n :type rad: boolean\n\n :rtype: :py:class:`peng.eng.Waveform`\n\n .. [[[cog cog.out(exobj_eng.get_sphinx_autodoc(raised=True)) ]]]\n .. Auto-generated exceptions documentation for\n .. peng.wave_functions.ifftp\n\n :raises:\n * RuntimeError (Argument \\`indep_max\\` is not valid)\n\n * RuntimeError (Argument \\`indep_min\\` is not valid)\n\n * RuntimeError (Argument \\`npoints\\` is not valid)\n\n * RuntimeError (Argument \\`rad\\` is not valid)\n\n * RuntimeError (Argument \\`unwrap\\` is not valid)\n\n * RuntimeError (Argument \\`wave\\` is not valid)\n\n * RuntimeError (Incongruent \\`indep_min\\` and \\`indep_max\\`\n arguments)\n\n * RuntimeError (Non-uniform frequency spacing)\n\n .. [[[end]]]", "id": "f6752:m31"} {"signature": "@pexdoc.pcontracts.contract(wave=Waveform)def sinh(wave):", "body": "return _operation(wave, \"\", \"\", np.sinh)", "docstring": "r\"\"\"\n Return the hyperbolic sine of a waveform's dependent variable vector.\n\n :param wave: Waveform\n :type wave: :py:class:`peng.eng.Waveform`\n\n :rtype: :py:class:`peng.eng.Waveform`\n\n .. [[[cog cog.out(exobj_eng.get_sphinx_autodoc()) ]]]\n .. Auto-generated exceptions documentation for\n .. peng.wave_functions.sinh\n\n :raises: RuntimeError (Argument \\`wave\\` is not valid)\n\n .. [[[end]]]", "id": "f6752:m46"} {"signature": "@pexdoc.pcontracts.contract(wave=Waveform)def floor(wave):", "body": "return _operation(wave, \"\", wave.dep_units, np.floor)", "docstring": "r\"\"\"\n Return the floor of a waveform's dependent variable vector.\n\n :param wave: Waveform\n :type wave: :py:class:`peng.eng.Waveform`\n\n :rtype: :py:class:`peng.eng.Waveform`\n\n .. [[[cog cog.out(exobj_eng.get_sphinx_autodoc()) ]]]\n .. Auto-generated exceptions documentation for\n .. peng.wave_functions.floor\n\n :raises: RuntimeError (Argument \\`wave\\` is not valid)\n\n .. [[[end]]]", "id": "f6752:m26"} {"signature": "def _operation(wave, desc, units, fpointer):", "body": "ret = copy.copy(wave)ret.dep_units = unitsret.dep_name = \"\".format(desc, ret.dep_name)ret._dep_vector = fpointer(ret._dep_vector)return ret", "docstring": "Perform generic operation on a waveform object.", "id": "f6752:m3"} {"signature": "@pexdoc.pcontracts.contract(wave=Waveform,dep_var=\"\",der=\"\",inst=\"\",indep_min=\"\",indep_max=\"\",)def find(wave, dep_var, der=None, inst=, indep_min=None, indep_max=None):", "body": "ret = copy.copy(wave)_bound_waveform(ret, indep_min, indep_max)close_min = np.isclose(min(ret._dep_vector), dep_var, FP_RTOL, FP_ATOL)close_max = np.isclose(max(ret._dep_vector), dep_var, FP_RTOL, FP_ATOL)if ((np.amin(ret._dep_vector) > dep_var) and (not close_min)) or ((np.amax(ret._dep_vector) < dep_var) and (not close_max)):return Nonecross_wave = ret._dep_vector - dep_varsign_wave = np.sign(cross_wave)exact_idx = np.where(np.isclose(ret._dep_vector, dep_var, FP_RTOL, FP_ATOL))[]left_idx = np.where(np.diff(sign_wave))[]left_idx = np.setdiff1d(left_idx, exact_idx)left_idx = np.setdiff1d(left_idx, exact_idx - )right_idx = left_idx + if left_idx.size else np.array([])indep_var = ret._indep_vector[exact_idx] if exact_idx.size else np.array([])dvector = np.zeros(exact_idx.size).astype(int) if exact_idx.size else np.array([])if left_idx.size and (ret.interp == \"\"):idvector = ( * (ret._dep_vector[right_idx] > ret._dep_vector[left_idx]).astype(int)- )if indep_var.size:indep_var = np.concatenate((indep_var, ret._indep_vector[right_idx]))dvector = np.concatenate((dvector, idvector))sidx = np.argsort(indep_var)indep_var = indep_var[sidx]dvector = dvector[sidx]else:indep_var = ret._indep_vector[right_idx]dvector = idvectorelif left_idx.size:y_left = ret._dep_vector[left_idx]y_right = ret._dep_vector[right_idx]x_left = ret._indep_vector[left_idx]x_right = ret._indep_vector[right_idx]slope = ((y_left - y_right) / (x_left - x_right)).astype(float)if indep_var.size:indep_var = np.concatenate((indep_var, x_left + ((dep_var - y_left) / slope)))dvector = np.concatenate((dvector, np.where(slope > , , -)))sidx = np.argsort(indep_var)indep_var = indep_var[sidx]dvector = dvector[sidx]else:indep_var = x_left + ((dep_var - y_left) / slope)dvector = np.where(slope > , +, -)if der is not None:indep_var = np.extract(dvector == der, indep_var)return indep_var[inst - ] if inst <= indep_var.size else None", "docstring": "r\"\"\"\n Return the independent variable point associated with a dependent variable point.\n\n If the dependent variable point is not in the dependent variable vector the\n independent variable vector point is obtained by linear interpolation\n\n :param wave: Waveform\n :type wave: :py:class:`peng.eng.Waveform`\n\n :param dep_var: Dependent vector value to search for\n :type dep_var: integer, float or complex\n\n :param der: Dependent vector derivative filter. If +1 only independent\n vector points that have positive derivatives when crossing\n the requested dependent vector point are returned; if -1 only\n independent vector points that have negative derivatives when\n crossing the requested dependent vector point are returned;\n if 0 only independent vector points that have null derivatives\n when crossing the requested dependent vector point are\n returned; otherwise if None all independent vector points are\n returned regardless of the dependent vector derivative. The\n derivative of the first and last point of the waveform is\n assumed to be null\n :type der: integer, float or complex\n\n :param inst: Instance number filter. If, for example, **inst** equals 3,\n then the independent variable vector point at which the\n dependent variable vector equals the requested value for the\n third time is returned\n :type inst: positive integer\n\n :param indep_min: Independent vector start point of computation\n :type indep_min: integer or float\n\n :param indep_max: Independent vector stop point of computation\n :type indep_max: integer or float\n\n :rtype: integer, float or None if the dependent variable point is not found\n\n .. [[[cog cog.out(exobj_eng.get_sphinx_autodoc(raised=True)) ]]]\n .. Auto-generated exceptions documentation for\n .. peng.wave_functions.find\n\n :raises:\n * RuntimeError (Argument \\`dep_var\\` is not valid)\n\n * RuntimeError (Argument \\`der\\` is not valid)\n\n * RuntimeError (Argument \\`indep_max\\` is not valid)\n\n * RuntimeError (Argument \\`indep_min\\` is not valid)\n\n * RuntimeError (Argument \\`inst\\` is not valid)\n\n * RuntimeError (Argument \\`wave\\` is not valid)\n\n * RuntimeError (Incongruent \\`indep_min\\` and \\`indep_max\\`\n arguments)\n\n .. [[[end]]]", "id": "f6752:m25"} {"signature": "@pexdoc.pcontracts.contract(wave=Waveform, indep_min=\"\", indep_max=\"\")def nmin(wave, indep_min=None, indep_max=None):", "body": "ret = copy.copy(wave)_bound_waveform(ret, indep_min, indep_max)return np.min(ret._dep_vector)", "docstring": "r\"\"\"\n Return the minimum of a waveform's dependent variable vector.\n\n :param wave: Waveform\n :type wave: :py:class:`peng.eng.Waveform`\n\n :param indep_min: Independent vector start point of computation\n :type indep_min: integer or float\n\n :param indep_max: Independent vector stop point of computation\n :type indep_max: integer or float\n\n :rtype: float\n\n .. [[[cog cog.out(exobj_eng.get_sphinx_autodoc(raised=True)) ]]]\n .. Auto-generated exceptions documentation for\n .. peng.wave_functions.nmin\n\n :raises:\n * RuntimeError (Argument \\`indep_max\\` is not valid)\n\n * RuntimeError (Argument \\`indep_min\\` is not valid)\n\n * RuntimeError (Argument \\`wave\\` is not valid)\n\n * RuntimeError (Incongruent \\`indep_min\\` and \\`indep_max\\`\n arguments)\n\n .. [[[end]]]", "id": "f6752:m41"} {"signature": "@pexdoc.pcontracts.contract(wave=Waveform)def asinh(wave):", "body": "return _operation(wave, \"\", \"\", np.arcsinh)", "docstring": "r\"\"\"\n Return the hyperbolic arc sine of a waveform's dependent variable vector.\n\n :param wave: Waveform\n :type wave: :py:class:`peng.eng.Waveform`\n\n :rtype: :py:class:`peng.eng.Waveform`\n\n .. [[[cog cog.out(exobj_eng.get_sphinx_autodoc()) ]]]\n .. Auto-generated exceptions documentation for\n .. peng.wave_functions.asinh\n\n :raises: RuntimeError (Argument \\`wave\\` is not valid)\n\n .. [[[end]]]", "id": "f6752:m9"} {"signature": "@pexdoc.pcontracts.contract(wave=Waveform)def sin(wave):", "body": "return _operation(wave, \"\", \"\", np.sin)", "docstring": "r\"\"\"\n Return the sine of a waveform's dependent variable vector.\n\n :param wave: Waveform\n :type wave: :py:class:`peng.eng.Waveform`\n\n :rtype: :py:class:`peng.eng.Waveform`\n\n .. [[[cog cog.out(exobj_eng.get_sphinx_autodoc()) ]]]\n .. Auto-generated exceptions documentation for peng.wave_functions.sin\n\n :raises: RuntimeError (Argument \\`wave\\` is not valid)\n\n .. [[[end]]]", "id": "f6752:m45"} {"signature": "@pexdoc.pcontracts.contract(wave=Waveform)def cosh(wave):", "body": "return _operation(wave, \"\", \"\", np.cosh)", "docstring": "r\"\"\"\n Return the hyperbolic cosine of a waveform's dependent variable vector.\n\n :param wave: Waveform\n :type wave: :py:class:`peng.eng.Waveform`\n\n :rtype: :py:class:`peng.eng.Waveform`\n\n .. [[[cog cog.out(exobj_eng.get_sphinx_autodoc()) ]]]\n .. Auto-generated exceptions documentation for\n .. peng.wave_functions.cosh\n\n :raises: RuntimeError (Argument \\`wave\\` is not valid)\n\n .. [[[end]]]", "id": "f6752:m15"} {"signature": "@pexdoc.pcontracts.contract(wave=Waveform)def real(wave):", "body": "return _operation(wave, \"\", wave.dep_units, np.real)", "docstring": "r\"\"\"\n Return the real part of a waveform's dependent variable vector.\n\n :param wave: Waveform\n :type wave: :py:class:`peng.eng.Waveform`\n\n :rtype: :py:class:`peng.eng.Waveform`\n\n .. [[[cog cog.out(exobj_eng.get_sphinx_autodoc()) ]]]\n .. Auto-generated exceptions documentation for\n .. peng.wave_functions.real\n\n :raises: RuntimeError (Argument \\`wave\\` is not valid)\n\n .. [[[end]]]", "id": "f6752:m43"} {"signature": "def trace_module(no_print=True):", "body": "mname = \"\"fname = \"\"module_prefix = \"\".format(mname)callable_names = (\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",)return docs.support.trace_support.run_trace(mname, fname, module_prefix, callable_names, no_print)", "docstring": "Trace eng wave module exceptions.", "id": "f6756:m0"} {"signature": "def ste(command, nindent, mdir, fpointer):", "body": "term_echo(\"\".format(sep=os.path.sep, cmd=command),nindent,{\"\": mdir},fpointer,)", "docstring": "r\"\"\"\n Echo terminal output.\n\n Print STDOUT resulting from a given Bash shell command (relative to the\n package :code:`pypkg` directory) formatted in reStructuredText\n\n :param command: Bash shell command, relative to\n :bash:`${PMISC_DIR}/pypkg`\n :type command: string\n\n :param nindent: Indentation level\n :type nindent: integer\n\n :param mdir: Module directory\n :type mdir: string\n\n :param fpointer: Output function pointer. Normally is :code:`cog.out` but\n :code:`print` or other functions can be used for\n debugging\n :type fpointer: function object\n\n For example::\n\n .. This is a reStructuredText file snippet\n .. [[[cog\n .. import os, sys\n .. from docs.support.term_echo import term_echo\n .. file_name = sys.modules['docs.support.term_echo'].__file__\n .. mdir = os.path.realpath(\n .. os.path.dirname(\n .. os.path.dirname(os.path.dirname(file_name))\n .. )\n .. )\n .. [[[cog ste('build_docs.py -h', 0, mdir, cog.out) ]]]\n\n .. code-block:: bash\n\n $ ${PMISC_DIR}/pypkg/build_docs.py -h\n usage: build_docs.py [-h] [-d DIRECTORY] [-n NUM_CPUS]\n ...\n\n .. ]]]", "id": "f6758:m0"} {"signature": "def term_echo(command, nindent=, env=None, fpointer=None, cols=):", "body": "os.environ[\"\"] = str(cols)command_int = commandif env:for var, repl in env.items():command_int = command_int.replace(\"\" + var + \"\", repl)tokens = command_int.split(\"\")if (platform.system().lower() == \"\") and (tokens[].endswith(\"\")):tokens = [sys.executable] + tokensproc = subprocess.Popen(tokens, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)stdout = proc.communicate()[]if sys.hexversion >= :stdout = stdout.decode(\"\")stdout = stdout.split(\"\")indent = nindent * \"\"fpointer(\"\", dedent=False)fpointer(\"\".format(indent), dedent=False)fpointer(\"\", dedent=False)fpointer(\"\".format(indent, command), dedent=False)for line in stdout:if line.strip():fpointer(indent + \"\" + line.replace(\"\", \"\") + \"\", dedent=False)else:fpointer(\"\", dedent=False)fpointer(\"\", dedent=False)", "docstring": "Print STDOUT resulting from a Bash shell command formatted in reStructuredText.\n\n:param command: Bash shell command\n:type command: string\n\n:param nindent: Indentation level\n:type nindent: integer\n\n:param env: Environment variable replacement dictionary. The Bash\n command is pre-processed and any environment variable\n represented in the full notation (:bash:`${...}`) is replaced.\n The dictionary key is the environment variable name and the\n dictionary value is the replacement value. For example, if\n **command** is :code:`'${PYTHON_CMD} -m \"x=5\"'` and **env**\n is :code:`{'PYTHON_CMD':'python3'}` the actual command issued\n is :code:`'python3 -m \"x=5\"'`\n:type env: dictionary\n\n:param fpointer: Output function pointer. Normally is :code:`cog.out` but\n :code:`print` or other functions can be used for\n debugging\n:type fpointer: function object\n\n:param cols: Number of columns of output\n:type cols: integer", "id": "f6758:m1"} {"signature": "@pexdoc.pcontracts.new_contract()def touchstone_data(obj):", "body": "if (not isinstance(obj, dict)) or (isinstance(obj, dict)and (sorted(obj.keys()) != sorted([\"\", \"\", \"\"]))):raise ValueError(pexdoc.pcontracts.get_exdesc())if not (isinstance(obj[\"\"], int) and (obj[\"\"] > )):raise ValueError(pexdoc.pcontracts.get_exdesc())if _check_increasing_real_numpy_vector(obj[\"\"]):raise ValueError(pexdoc.pcontracts.get_exdesc())if not isinstance(obj[\"\"], np.ndarray):raise ValueError(pexdoc.pcontracts.get_exdesc())vdata = [\"\", \"\", \"\"]if not any([obj[\"\"].dtype.name.startswith(item) for item in vdata]):raise ValueError(pexdoc.pcontracts.get_exdesc())if obj[\"\"].size != obj[\"\"]:raise ValueError(pexdoc.pcontracts.get_exdesc())nports = int(math.sqrt(obj[\"\"].size / obj[\"\"].size))if obj[\"\"] * (nports ** ) != obj[\"\"].size:raise ValueError(pexdoc.pcontracts.get_exdesc())", "docstring": "r\"\"\"\n Validate if an object is an :ref:`TouchstoneData` pseudo-type object.\n\n :param obj: Object\n :type obj: any\n\n :raises: RuntimeError (Argument \\`*[argument_name]*\\` is not valid). The\n token \\*[argument_name]\\* is replaced by the name of the argument the\n contract is attached to\n\n :rtype: None", "id": "f6759:m8"} {"signature": "def def_links(mobj):", "body": "fdict = json_load(os.path.join(\"\", \"\"))sdeps = sorted(fdict.keys())olines = []for item in sdeps:olines.append(\"\".format(name=fdict[item][\"\"], url=fdict[item][\"\"]))ret = []for line in olines:wobj = textwrap.wrap(line, width=LINE_WIDTH, subsequent_indent=\"\")ret.append(\"\".join([item for item in wobj]))mobj.out(\"\".join(ret))", "docstring": "Define Sphinx requirements links.", "id": "f6760:m0"} {"signature": "def ops_to_words(item):", "body": "unsupp_ops = [\"\", \"\"]supp_ops = [\"\", \">\", \"\", \"\", \"\", \"\"]tokens = sorted(item.split(\"\"), reverse=True)actual_tokens = []for req in tokens:for op in unsupp_ops:if req.startswith(op):raise RuntimeError(\"\".format(op))for op in supp_ops:if req.startswith(op):actual_tokens.append(op)breakelse:raise RuntimeError(\"\".format(op))if len(list(set(actual_tokens))) != len(actual_tokens):raise RuntimeError(\"\")if \"\" in actual_tokens:return (\"\".join([op_to_words(token) for token in tokens[:-]])+ \"\"+ op_to_words(tokens[-]))return \"\".join([op_to_words(token) for token in tokens])", "docstring": "Translate requirement specification to words.", "id": "f6760:m4"} {"signature": "def make_multi_entry(plist, pkg_pyvers, ver_dict):", "body": "for pyver in pkg_pyvers:pver = pyver[] + \"\" + pyver[:]plist.append(\"\".format(pver, ops_to_words(ver_dict[pyver])))", "docstring": "Generate Python interpreter version entries.", "id": "f6760:m2"} {"signature": "def op_to_words(item):", "body": "sdicts = [{\"\": \"\"},{\"\": \"\"},{\">\": \"\"},{\"\": \"\"},{\"\": \"\"},{\"\": \"\"},]for sdict in sdicts:prefix = list(sdict.keys())[]suffix = sdict[prefix]if item.startswith(prefix):if prefix == \"\":return item[:]if prefix == \"\":return suffix + item[:]if prefix in [\">\", \"\"]:return suffix + item[:]return item[:] + suffixraise RuntimeError(\"\")", "docstring": "Translate >=, ==, <= to words.", "id": "f6760:m3"} {"signature": "def proc_requirements(mobj):", "body": "pyvers = [\"\".format(item.replace(\"\", \"\")) for item in get_supported_interps()]py2vers = sorted([item for item in pyvers if item.startswith(\"\")])py3vers = sorted([item for item in pyvers if item.startswith(\"\")])fdict = json_load(os.path.join(\"\", \"\"))olines = [\"\"]sdict = dict([(item[\"\"], item) for item in fdict.values()])for real_name in sorted(sdict.keys()):pkg_dict = sdict[real_name]if pkg_dict[\"\"] == [\"\"]:continueplist = [] if not pkg_dict[\"\"] else [\"\"]if isinstance(pkg_dict[\"\"], str):pkg_dict[\"\"] = dict([(pyver, pkg_dict[\"\"]) for pyver in pyvers])pkg_pyvers = sorted(pkg_dict[\"\"].keys())pkg_py2vers = sorted([item for item in pkg_dict[\"\"].keys() if item.startswith(\"\")])req_vers = list(set(pkg_dict[\"\"].values()))req_py2vers = list(set([pkg_dict[\"\"][item] for item in py2vers if item in pkg_dict[\"\"]]))req_py3vers = list(set([pkg_dict[\"\"][item] for item in py3vers if item in pkg_dict[\"\"]]))if (len(req_vers) == ) and (pkg_pyvers == pyvers):plist.append(ops_to_words(req_vers[]))elif ((pkg_pyvers == pyvers)and (len(req_py2vers) == )and (len(req_py3vers) == )):make_common_entry(plist, \"\", \"\", req_py2vers[])make_common_entry(plist, \"\", \"\", req_py3vers[])elif ((pkg_pyvers == pyvers)and (len(req_py2vers) == len(py2vers))and (len(req_py3vers) == )and (pkg_dict[\"\"][pkg_py2vers[-]] == req_py3vers[])):py2dict = dict([(key, value)for key, value in pkg_dict[\"\"].items()if key.startswith(\"\") and (key != pkg_py2vers[-])])make_multi_entry(plist, py2vers[:-], py2dict)pver = pkg_py2vers[-][] + \"\" + pkg_py2vers[-][:]plist.append(\"\".format(pyver=pver, ver=ops_to_words(req_py3vers[])))elif ((pkg_pyvers == pyvers)and (len(req_py2vers) == len(py2vers))and (len(req_py3vers) == )):py2dict = dict([(key, value)for key, value in pkg_dict[\"\"].items()if key.startswith(\"\")])make_multi_entry(plist, py2vers, py2dict)make_common_entry(plist, \"\", \"\", req_py3vers[])elif ((pkg_pyvers == pyvers)and (len(req_py3vers) == len(py3vers))and (len(req_py2vers) == )):py3dict = dict([(key, value)for key, value in pkg_dict[\"\"].items()if key.startswith(\"\")])make_common_entry(plist, \"\", \"\", req_py2vers[])make_multi_entry(plist, py3vers, py3dict)elif (len(req_vers) == ) and (pkg_pyvers == py2vers):make_common_entry(plist, \"\", \"\", req_vers[])elif (len(req_vers) == ) and (pkg_pyvers == py3vers):make_common_entry(plist, \"\", \"\", req_vers[])else:make_multi_entry(plist, pkg_pyvers, pkg_dict[\"\"])olines.append(\"\".format(name=pkg_dict[\"\"], par=\"\".join(plist)))ret = []for line in olines:wobj = textwrap.wrap(line, width=LINE_WIDTH, subsequent_indent=\"\")ret.append(\"\".join([item for item in wobj]))mobj.out(\"\".join(ret) + \"\")", "docstring": "Get requirements in reStructuredText format.", "id": "f6760:m5"} {"signature": "def trace_module(no_print=True):", "body": "mname = \"\"fname = \"\"module_prefix = \"\".format(mname)callable_names = (\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",)return docs.support.trace_support.run_trace(mname, fname, module_prefix, callable_names, no_print)", "docstring": "Trace eng functions module exceptions.", "id": "f6761:m0"} {"signature": "def incfile(fname, fpointer, lrange=\"\", sdir=None):", "body": "file_dir = (sdirif sdirelse os.environ.get(\"\", os.path.abspath(os.path.dirname(__file__))))fname = os.path.join(file_dir, fname)with open(fname) as fobj:lines = fobj.readlines()tokens = [item.strip() for item in lrange.split(\"\")]inc_lines = []for token in tokens:if \"\" in token:subtokens = token.split(\"\")lmin, lmax = (int(subtokens[]),int(subtokens[]) if subtokens[] else len(lines),)for num in range(lmin, lmax + ):inc_lines.append(num)else:inc_lines.append(int(token))fpointer(\"\")fpointer(\"\")for num, line in enumerate(lines):if num + in inc_lines:fpointer(\"\" + line.replace(\"\", \"\") if line.strip() else \"\")fpointer(\"\")", "docstring": "r\"\"\"\n Include a Python source file in a docstring formatted in reStructuredText.\n\n :param fname: File name, relative to environment variable\n :bash:`${TRACER_DIR}`\n :type fname: string\n\n :param fpointer: Output function pointer. Normally is :code:`cog.out` but\n :code:`print` or other functions can be used for\n debugging\n :type fpointer: function object\n\n :param lrange: Line range to include, similar to Sphinx\n `literalinclude `_ directive\n :type lrange: string\n\n :param sdir: Source file directory. If None the :bash:`${TRACER_DIR}`\n environment variable is used if it is defined, otherwise\n the directory where the :code:`docs.support.incfile` module\n is located is used\n :type sdir: string\n\n For example:\n\n .. code-block:: python\n\n def func():\n \\\"\\\"\\\"\n This is a docstring. This file shows how to use it:\n\n .. =[=cog\n .. import docs.support.incfile\n .. docs.support.incfile.incfile('func_example.py', cog.out)\n .. =]=\n .. code-block:: python\n\n # func_example.py\n if __name__ == '__main__':\n func()\n\n .. =[=end=]=\n \\\"\\\"\\\"\n return 'This is func output'", "id": "f6763:m0"} {"signature": "def filter_glyph_names( alist, filter ):", "body": "count = extras = []for name in alist:try:filtered_index = filter.index( name )except:extras.append( name )return extras", "docstring": "filter `alist' by taking _out_ all glyph names that are in `filter", "id": "f6765:m1"} {"signature": "def __init__( self ):", "body": "self.reset()self.sections = {} self.section = None self.chapters = [] self.headers = {}", "docstring": "initialize a block content processor", "id": "f6769:c6:m0"} {"signature": "def make_html_words( self, words ):", "body": "line = \"\"if words:line = html_quote( words[] )for w in words[:]:line = line + \"\" + html_quote( w )return line", "docstring": "convert a series of simple words into some HTML text", "id": "f6770:c0:m3"} {"signature": "def make_html_word( self, word ):", "body": "m = re_crossref.match( word )if m:try:name = m.group( )rest = m.group( )block = self.identifiers[name]url = self.make_block_url( block )return '' + url + '' + name + '' + restexcept:sys.stderr.write(\"\" + name + \"\" )return '' + name + '' + restm = re_italic.match( word )if m:name = m.group( )rest = m.group( )return '' + name + '' + restm = re_bold.match( word )if m:name = m.group( )rest = m.group( )return '' + name + '' + restreturn html_quote( word )", "docstring": "analyze a simple word to detect cross-references and styling", "id": "f6770:c0:m4"} {"signature": "def file_exists( pathname ):", "body": "result = try:file = open( pathname, \"\" )file.close()except:result = Nonesys.stderr.write( pathname + \"\" )return result", "docstring": "checks that a given file exists", "id": "f6772:m5"} {"signature": "def main( argv ):", "body": "global output_dirtry:opts, args = getopt.getopt( sys.argv[:],\"\",[\"\", \"\", \"\", \"\"] )except getopt.GetoptError:usage()sys.exit( )if args == []:usage()sys.exit( )project_title = \"\"project_prefix = Noneoutput_dir = Nonefor opt in opts:if opt[] in ( \"\", \"\" ):usage()sys.exit( )if opt[] in ( \"\", \"\" ):project_title = opt[]if opt[] in ( \"\", \"\" ):utils.output_dir = opt[]if opt[] in ( \"\", \"\" ):project_prefix = opt[]check_output()source_processor = SourceProcessor()content_processor = ContentProcessor()file_list = make_file_list( args )for filename in file_list:source_processor.parse_file( filename )content_processor.parse_sources( source_processor )content_processor.finish()formatter = HtmlFormatter( content_processor, project_title, project_prefix )formatter.toc_dump()formatter.index_dump()formatter.section_dump_all()", "docstring": "main program loop", "id": "f6773:m1"} {"signature": "def has_axis(self, axis):", "body": "return (axis & self._supported_axes_mask) != ", "docstring": "Returns ``True`` if the controller has the requested axis, a value of enumeration :class:`ControllerAxes`.", "id": "f6775:c0:m2"} {"signature": "@propertydef action_left(self):", "body": "return self._buttons & ControllerButtons.action_left", "docstring": "``True`` if the left action button (\"X\" on Xbox 360) is pressed. Available only on game controllers with \n the ``standard`` or ``extended`` profiles.", "id": "f6775:c0:m18"} {"signature": "@propertydef right_trigger(self):", "body": "return self.get_axis(ControllerAxes.right_trigger)", "docstring": "The absolute right trigger value, between 0.0 and 1.0. Available only on game controllers with the\n ``extended`` profile.", "id": "f6775:c0:m12"} {"signature": "@propertydef controller_index(self):", "body": "return self._controller_index", "docstring": "The index of the controller, between 0 and 4 (read-only). Typically this is assigned in the order the\n controllers are detected, however some controllers may have an intrinsic \"player number\" that is exposed\n through this number. No two controllers will have the same controller index.", "id": "f6775:c0:m1"} {"signature": "def has_button(self, button):", "body": "return (button & self._supported_buttons_mask) != ", "docstring": "Returns ``True`` if the controller has the requested button, a value of enumeration :class:`ControllerButtons`.", "id": "f6775:c0:m4"} {"signature": "@propertydef left_thumb_x(self):", "body": "return self.get_axis(ControllerAxes.left_thumb_x)", "docstring": "The absolute X axis value of the left thumb-stick, or the main stick on a joystick.", "id": "f6775:c0:m7"} {"signature": "@propertydef action_up(self):", "body": "return self._buttons & ControllerButtons.action_up", "docstring": "``True`` if the up action button (\"Y\" on Xbox 360) is pressed. Available only on game controllers with \n the ``standard`` or ``extended`` profiles.", "id": "f6775:c0:m16"} {"signature": "@propertydef height(self):", "body": "return self._height", "docstring": "The height of the image, in texels (read-only).", "id": "f6776:c0:m4"} {"signature": "def set_transform(matrix):", "body": "lib.SetTransform((c_float * )(*matrix))", "docstring": "Replace the current graphics transform with the given 4x4 matrix. For example, to replace\n the transform with a translation by ``(x, y)``::\n\n bacon.set_transform([1, 0, 0, x,\n 0, 1, 0, y,\n 0, 0, 1, 0,\n 0, 0, 0, 1])\n\n :param matrix: a 4x4 matrix in column major order, represented as a flat 16 element sequence.", "id": "f6778:m0"} {"signature": "@propertydef right(self):", "body": "return self.button_mask & ( << )", "docstring": "``True`` if the right mouse button is currently pressed.", "id": "f6783:c0:m4"} {"signature": "def run(game):", "body": "if bacon._current_game:bacon._current_game = gamereturnglobal _tick_callback_handlebacon._current_game = gamewindow_resize_callback_handle = lib.WindowResizeEventHandler(window._window_resize_event_handler)lib.SetWindowResizeEventHandler(window_resize_callback_handle)key_callback_handle = lib.KeyEventHandler(keyboard._key_event_handler)lib.SetKeyEventHandler(key_callback_handle)mouse_button_callback_handle = lib.MouseButtonEventHandler(mouse_input._mouse_button_event_handler)lib.SetMouseButtonEventHandler(mouse_button_callback_handle)mouse_scroll_callback_handle = lib.MouseScrollEventHandler(mouse_input._mouse_scroll_event_handler)lib.SetMouseScrollEventHandler(mouse_scroll_callback_handle)controller_connected_handle = lib.ControllerConnectedEventHandler(controller._controller_connected_event_handler)lib.SetControllerConnectedEventHandler(controller_connected_handle)controller_button_handle = lib.ControllerButtonEventHandler(controller._controller_button_event_handler)lib.SetControllerButtonEventHandler(controller_button_handle)controller_axis_handle = lib.ControllerAxisEventHandler(controller._controller_axis_event_handler)lib.SetControllerAxisEventHandler(controller_axis_handle)_tick_callback_handle = lib.TickCallback(_first_tick_callback)lib.SetTickCallback(_tick_callback_handle)lib.Run()bacon._current_game = None_tick_callback_handle = Nonelib.SetWindowResizeEventHandler(lib.WindowResizeEventHandler())lib.SetKeyEventHandler(lib.KeyEventHandler())lib.SetMouseButtonEventHandler(lib.MouseButtonEventHandler())lib.SetMouseScrollEventHandler(lib.MouseScrollEventHandler())lib.SetControllerConnectedEventHandler(lib.ControllerConnectedEventHandler())lib.SetControllerButtonEventHandler(lib.ControllerButtonEventHandler())lib.SetControllerAxisEventHandler(lib.ControllerAxisEventHandler())lib.SetTickCallback(lib.TickCallback())", "docstring": "Start running the game. The window is created and shown at this point, and then\n the main event loop is entered. 'game.on_tick' and other event handlers are called\n repeatedly until the game exits.\n\n If a game is already running, this function replaces the :class:`Game` instance that\n receives events.", "id": "f6786:m3"} {"signature": "def on_controller_disconnected(self, controller):", "body": "pass", "docstring": "Called when a game controller is disconnected. You should use the `controller` parameter only\n to identify a previously used controller; its properties and values will no longer be available.\n\n :param controller: the :class:`Controller` that was disconnected", "id": "f6786:c0:m7"} {"signature": "def on_tick(self):", "body": "clear(, , , )", "docstring": "Called once per frame to update and render the game. You may only call\n drawing functions within the scope of this method.", "id": "f6786:c0:m1"} {"signature": "def on_controller_axis(self, controller, axis, value):", "body": "pass", "docstring": "Called when an axis on a game controller is moved.\n\n :param controller: the :class:`Controller` containing the axis\n :param button: axis index, of :class:`ControllerAxes` enumeration\n :param value: absolute position of the axis, between ``-1.0`` and ``1.0``", "id": "f6786:c0:m9"} {"signature": "def on_controller_button(self, controller, button, pressed):", "body": "pass", "docstring": "Called when a button on a game controller is pressed or released.\n\n :param controller: the :class:`Controller` containing the button\n :param button: button index, of :class:`ControllerButtons` enumeration\n :param pressed: ``True`` if the button was pressed, otherwise ``False``", "id": "f6786:c0:m8"} {"signature": "def draw_string(font, text, x, y, width=None, height=None, align=Alignment.left, vertical_align=VerticalAlignment.baseline):", "body": "style = Style(font)run = GlyphRun(style, text)glyph_layout = GlyphLayout([run], x, y, width, height, align, vertical_align)draw_glyph_layout(glyph_layout)", "docstring": "Draw a string with the given font.\n\n :note: Text alignment and word-wrapping is not yet implemented. The text is rendered with the left edge and\n baseline at ``(x, y)``.\n\n :param font: the :class:`Font` to render text with\n :param text: a string of text to render.", "id": "f6787:m0"} {"signature": "@propertydef fragment_source(self):", "body": "return self._fragment_source", "docstring": "Get the fragment shader source\n\n :type: ``str``", "id": "f6793:c0:m4"} {"signature": "@propertydef vertex_source(self):", "body": "return self._vertex_source", "docstring": "Get the vertex shader source\n\n :type: ``str``", "id": "f6793:c0:m3"} {"signature": "@propertydef metrics(self):", "body": "return self._metrics", "docstring": "Retrieves the pixel-space design metrics of the font.\n\n :return: :class:`FontMetrics`", "id": "f6794:c3:m1"} {"signature": "@propertydef ascent(self):", "body": "return self._ascent", "docstring": "Ascent of the font above the baseline, in pixels; typically negative", "id": "f6794:c0:m2"} {"signature": "def get_glyphs(self, str):", "body": "return [self.get_glyph(c) for c in str]", "docstring": "Retrieves a list of :class:`Glyph` for the given string.\n\n :param str: the string to render", "id": "f6794:c3:m5"} {"signature": "def arc_distance(theta_1, phi_1, theta_2, phi_2):", "body": "temp = (np.sin((theta_2-theta_1)/)**+ np.cos(theta_1)*np.cos(theta_2) * np.sin((phi_2-phi_1)/)**)distance_matrix = * np.arctan2(np.sqrt(temp), np.sqrt(-temp))return distance_matrix", "docstring": "Calculates the pairwise arc distance\nbetween all points in vector a and b.", "id": "f6876:m0"} {"signature": "def export_hdf5_v1(dataset, path, column_names=None, byteorder=\"\", shuffle=False, selection=False, progress=None, virtual=True):", "body": "if selection:if selection == True: selection = \"\"with h5py.File(path, \"\") as h5file_output:h5data_output = h5file_output.require_group(\"\")N = len(dataset) if not selection else dataset.selected_length(selection)if N == :raise ValueError(\"\")logger.debug(\"\", virtual)logger.debug(\"\" % (N, path))column_names = column_names or dataset.get_column_names(virtual=virtual, strings=True)logger.debug(\"\" % column_names)for column_name in column_names:dtype = dataset.dtype(column_name)if column_name in dataset.get_column_names(strings=True):column = dataset.columns[column_name]shape = (N,) + column.shape[:]else:shape = (N,)if dtype.type == np.datetime64:array = h5file_output.require_dataset(\"\" % column_name, shape=shape, dtype=np.int64)array.attrs[\"\"] = dtype.nameelse:try:array = h5file_output.require_dataset(\"\" % column_name, shape=shape, dtype=dtype.newbyteorder(byteorder))except:logging.exception(\"\" % (column_name, dtype))array[] = array[] random_index_name = Nonecolumn_order = list(column_names) if shuffle:random_index_name = \"\"while random_index_name in dataset.get_column_names():random_index_name += \"\"shuffle_array = h5file_output.require_dataset(\"\" + random_index_name, shape=(N,), dtype=byteorder + \"\")shuffle_array[] = shuffle_array[]column_order.append(random_index_name) h5data_output.attrs[\"\"] = \"\".join(column_order) dataset_output = vaex.hdf5.dataset.Hdf5MemoryMapped(path, write=True)column_names = vaex.export._export(dataset_input=dataset, dataset_output=dataset_output, path=path, random_index_column=random_index_name,column_names=column_names, selection=selection, shuffle=shuffle, byteorder=byteorder,progress=progress)import getpassimport datetimeuser = getpass.getuser()date = str(datetime.datetime.now())source = dataset.pathdescription = \"\" % (user, date, source)if dataset.description:description += \"\" + dataset.descriptiondataset_output.copy_metadata(dataset)dataset_output.description = descriptionlogger.debug(\"\")dataset_output.write_meta()dataset_output.close_files()return", "docstring": ":param DatasetLocal dataset: dataset to export\n:param str path: path for file\n:param lis[str] column_names: list of column names to export or None for all columns\n:param str byteorder: = for native, < for little endian and > for big endian\n:param bool shuffle: export rows in random order\n:param bool selection: export selection or not\n:param progress: progress callback that gets a progress fraction as argument and should return True to continue,\n or a default progress bar when progress=True\n:param: bool virtual: When True, export virtual columns\n:return:", "id": "f6895:m0"} {"signature": "def write_meta(self):", "body": "with h5py.File(self.filename, \"\") as h5file_output:h5table_root = h5file_output[self.h5table_root_name]if self.description is not None:h5table_root.attrs[\"\"] = self.descriptionh5columns = h5table_root if self._version == else h5table_root['']for column_name in self.columns.keys():h5dataset = Noneif column_name in h5columns:h5dataset = h5columns[column_name]else:for group in h5columns.values():if '' in group.attrs:if group.attrs[''] in ['']: for name, column in group.items():if name == column_name:h5dataset = columnif h5dataset is None:raise ValueError(''.format(column_name))for name, values in [(\"\", self.ucds), (\"\", self.units), (\"\", self.descriptions)]:if column_name in values:value = ensure_string(values[column_name], cast=True)h5dataset.attrs[name] = valueelse:if name in h5columns.attrs:del h5dataset.attrs[name]", "docstring": "ucds, descriptions and units are written as attributes in the hdf5 file, instead of a seperate file as\n the default :func:`Dataset.write_meta`.", "id": "f6897:c0:m1"} {"signature": "def _hide_column(self, column):", "body": "column = _ensure_string_from_expression(column)new_name = self._find_valid_name('' + column)self._rename(column, new_name)", "docstring": "Hides a column by prefixing the name with \\'__\\", "id": "f6914:c0:m193"} {"signature": "def propagate_uncertainties(self, columns, depending_variables=None, cov_matrix='',covariance_format=\"\",uncertainty_format=\"\"):", "body": "names = _ensure_strings_from_expressions(columns)virtual_columns = self._expr(*columns, always_list=True)if depending_variables is None:depending_variables = set()for expression in virtual_columns:depending_variables |= expression.variables()depending_variables = list(sorted(list(depending_variables)))fs = [self[self.virtual_columns[name]] for name in names]jacobian = self._jacobian(fs, depending_variables)m = len(fs)n = len(depending_variables)cov_matrix = self._covariance_matrix_guess(depending_variables, full=cov_matrix == \"\", as_expression=True)cov_matrix_out = [[self[''] for __ in range(m)] for __ in range(m)]for i in range(m):for j in range(m):for k in range(n):for l in range(n):if jacobian[i][k].expression == '' or jacobian[j][l].expression == '' or cov_matrix[k][l].expression == '':passelse:cov_matrix_out[i][j] = cov_matrix_out[i][j] + jacobian[i][k] * cov_matrix[k][l] * jacobian[j][l]for i in range(m):for j in range(i + ):sigma = cov_matrix_out[i][j]sigma = self._expr(vaex.expresso.simplify(_ensure_string_from_expression(sigma)))if i != j:self.add_virtual_column(covariance_format.format(names[i], names[j]), sigma)else:self.add_virtual_column(uncertainty_format.format(names[i]), np.sqrt(sigma))", "docstring": "Propagates uncertainties (full covariance matrix) for a set of virtual columns.\n\n Covariance matrix of the depending variables is guessed by finding columns prefixed by \"e\"\n or `\"e_\"` or postfixed by \"_error\", \"_uncertainty\", \"e\" and `\"_e\"`.\n Off diagonals (covariance or correlation) by postfixes with \"_correlation\" or \"_corr\" for\n correlation or \"_covariance\" or \"_cov\" for covariances.\n (Note that x_y_cov = x_e * y_e * x_y_correlation.)\n\n\n Example\n\n >>> df = vaex.from_scalars(x=1, y=2, e_x=0.1, e_y=0.2)\n >>> df[\"u\"] = df.x + df.y\n >>> df[\"v\"] = np.log10(df.x)\n >>> df.propagate_uncertainties([df.u, df.v])\n >>> df.u_uncertainty, df.v_uncertainty\n\n :param columns: list of columns for which to calculate the covariance matrix.\n :param depending_variables: If not given, it is found out automatically, otherwise a list of columns which have uncertainties.\n :param cov_matrix: List of list with expressions giving the covariance matrix, in the same order as depending_variables. If 'full' or 'auto',\n the covariance matrix for the depending_variables will be guessed, where 'full' gives an error if an entry was not found.", "id": "f6914:c0:m114"} {"signature": "@vaex.utils.deprecated('')def _hstack(self, other, prefix=None):", "body": "assert len(self) == len(other), \"\"for name in other.get_column_names():if prefix:new_name = prefix + nameelse:new_name = nameself.add_column(new_name, other.columns[name])", "docstring": "Join the columns of the other DataFrame to this one, assuming the ordering is the same", "id": "f6914:c1:m12"} {"signature": "@docsubst@stat_1ddef minmax(self, expression, binby=[], limits=None, shape=default_shape, selection=False, delay=False, progress=None):", "body": "@delayeddef finish(*minmax_list):value = vaex.utils.unlistify(waslist, np.array(minmax_list))value = value.astype(dtype0)return value@delayeddef calculate(expression, limits):task = tasks.TaskStatistic(self, binby, shape, limits, weight=expression, op=tasks.OP_MIN_MAX, selection=selection)self.executor.schedule(task)progressbar.add_task(task, \"\" % expression)return task@delayeddef finish(*minmax_list):value = vaex.utils.unlistify(waslist, np.array(minmax_list))value = value.astype(dtype0)return valueexpression = _ensure_strings_from_expressions(expression)binby = _ensure_strings_from_expressions(binby)waslist, [expressions, ] = vaex.utils.listify(expression)dtypes = [self.dtype(expr) for expr in expressions]dtype0 = dtypes[]if not all([k.kind == dtype0.kind for k in dtypes]):raise ValueError(\"\")progressbar = vaex.utils.progressbars(progress, name=\"\")limits = self.limits(binby, limits, selection=selection, delay=True)all_tasks = [calculate(expression, limits) for expression in expressions]result = finish(*all_tasks)return self._delay(delay, result)", "docstring": "Calculate the minimum and maximum for expressions, possibly on a grid defined by binby.\n\n\n Example:\n\n >>> df.minmax(\"x\")\n array([-128.293991, 271.365997])\n >>> df.minmax([\"x\", \"y\"])\n array([[-128.293991 , 271.365997 ],\n [ -71.5523682, 146.465836 ]])\n >>> df.minmax(\"x\", binby=\"x\", shape=5, limits=[-10, 10])\n array([[-9.99919128, -6.00010443],\n [-5.99972439, -2.00002384],\n [-1.99991322, 1.99998057],\n [ 2.0000093 , 5.99983597],\n [ 6.0004878 , 9.99984646]])\n\n :param expression: {expression}\n :param binby: {binby}\n :param limits: {limits}\n :param shape: {shape}\n :param selection: {selection}\n :param delay: {delay}\n :param progress: {progress}\n :return: {return_stat_scalar}, the last dimension is of shape (2)", "id": "f6914:c0:m36"} {"signature": "def dtype(self, expression, internal=False):", "body": "expression = _ensure_string_from_expression(expression)if expression in self.variables:return np.float64().dtypeelif expression in self.columns.keys():column = self.columns[expression]data = column[:]dtype = data.dtypeelse:data = self.evaluate(expression, , , filtered=False)dtype = data.dtypeif not internal:if dtype != str_type:if dtype.kind in '':return str_typeif dtype.kind == '':if isinstance(data[], six.string_types):return str_typereturn dtype", "docstring": "Return the numpy dtype for the given expression, if not a column, the first row will be evaluated to get the dtype.", "id": "f6914:c0:m57"} {"signature": "@docsubst@stat_1ddef sum(self, expression, binby=[], limits=None, shape=default_shape, selection=False, delay=False, progress=None, edges=False):", "body": "return self._compute_agg('', expression, binby, limits, shape, selection, delay, edges, progress)@delayeddef finish(*sums):return vaex.utils.unlistify(waslist, sums)expression = _ensure_strings_from_expressions(expression)binby = _ensure_strings_from_expressions(binby)waslist, [expressions, ] = vaex.utils.listify(expression)progressbar = vaex.utils.progressbars(progress)limits = self.limits(binby, limits, delay=True)sums = [self._sum_calculation(expression, binby=binby, limits=limits, shape=shape, selection=selection, progressbar=progressbar) for expression in expressions]s = finish(*sums)return self._delay(delay, s)", "docstring": "Calculate the sum for the given expression, possible on a grid defined by binby\n\n Example:\n\n >>> df.sum(\"L\")\n 304054882.49378014\n >>> df.sum(\"L\", binby=\"E\", shape=4)\n array([ 8.83517994e+06, 5.92217598e+07, 9.55218726e+07,\n 1.40008776e+08])\n\n :param expression: {expression}\n :param binby: {binby}\n :param limits: {limits}\n :param shape: {shape}\n :param selection: {selection}\n :param delay: {delay}\n :param progress: {progress}\n :return: {return_stat_scalar}", "id": "f6914:c0:m30"} {"signature": "def selected_length(self):", "body": "raise NotImplementedError", "docstring": "Returns the number of rows that are selected.", "id": "f6914:c0:m154"} {"signature": "def evaluate(self, expression, i1=None, i2=None, out=None, selection=None, filtered=True, internal=False):", "body": "expression = _ensure_string_from_expression(expression)selection = _ensure_strings_from_expressions(selection)i1 = i1 or i2 = i2 or (len(self) if (self.filtered and filtered) else self.length_unfiltered())mask = Noneif self.filtered and filtered: indices = self._filtered_range_to_unfiltered_indices(i1, i2)i1 = indices[]i2 = indices[-] + if selection is not None or (self.filtered and filtered):mask = self.evaluate_selection_mask(selection, i1, i2)scope = scopes._BlockScope(self, i1, i2, mask=mask, **self.variables)if out is not None:scope.buffers[expression] = outvalue = scope.evaluate(expression)if isinstance(value, ColumnString) and not internal:value = value.to_numpy()return value", "docstring": "The local implementation of :func:`DataFrame.evaluate`", "id": "f6914:c1:m17"} {"signature": "@docsubstdef to_dict(self, column_names=None, selection=None, strings=True, virtual=False):", "body": "return dict(self.to_items(column_names=column_names, selection=selection, strings=strings, virtual=virtual))", "docstring": "Return a dict containing the ndarray corresponding to the evaluated data\n\n :param column_names: list of column names, to export, when None DataFrame.get_column_names(strings=strings, virtual=virtual) is used\n :param selection: {selection}\n :param strings: argument passed to DataFrame.get_column_names when column_names is None\n :param virtual: argument passed to DataFrame.get_column_names when column_names is None\n :return: dict", "id": "f6914:c0:m95"} {"signature": "@_hiddendef add_virtual_columns_cartesian_to_spherical(self, x=\"\", y=\"\", z=\"\", alpha=\"\", delta=\"\", distance=\"\", radians=False, center=None, center_name=\"\"):", "body": "transform = \"\" if radians else \"\"if center is not None:self.add_variable(center_name, center)if center is not None and center[] != :x = \"\".format(**locals())if center is not None and center[] != :y = \"\".format(**locals())if center is not None and center[] != :z = \"\".format(**locals())self.add_virtual_column(distance, \"\".format(**locals()))self.add_virtual_column(alpha, \"\".format(**locals()))self.add_virtual_column(delta, \"\".format(**locals()))", "docstring": "Convert cartesian to spherical coordinates.\n\n\n\n :param x:\n :param y:\n :param z:\n :param alpha:\n :param delta: name for polar angle, ranges from -90 to 90 (or -pi to pi when radians is True).\n :param distance:\n :param radians:\n :param center:\n :param center_name:\n :return:", "id": "f6914:c0:m122"} {"signature": "def __getitem__(self, item):", "body": "if isinstance(item, int):names = self.get_column_names()return [self.evaluate(name, item, item+)[] for name in names]elif isinstance(item, six.string_types):if hasattr(self, item) and isinstance(getattr(self, item), Expression):return getattr(self, item)return Expression(self, item) elif isinstance(item, Expression):expression = item.expressiondf = self.copy()df.select(expression, name=FILTER_SELECTION_NAME, mode='')return dfelif isinstance(item, (tuple, list)):df = self.copy(column_names=item)return dfelif isinstance(item, slice):df = self.extract()start, stop, step = item.start, item.stop, item.stepstart = start or stop = stop or len(df)assert step in [None, ]df.set_active_range(start, stop)return df.trim()", "docstring": "Convenient way to get expressions, (shallow) copies of a few columns, or to apply filtering.\n\n Example:\n\n >>> df['Lz'] # the expression 'Lz\n >>> df['Lz/2'] # the expression 'Lz/2'\n >>> df[[\"Lz\", \"E\"]] # a shallow copy with just two columns\n >>> df[df.Lz < 0] # a shallow copy with the filter Lz < 0 applied", "id": "f6914:c0:m190"} {"signature": "@docsubst@stat_1ddef var(self, expression, binby=[], limits=None, shape=default_shape, selection=False, delay=False, progress=None):", "body": "expression = _ensure_strings_from_expressions(expression)@delayeddef calculate(expression, limits):task = tasks.TaskStatistic(self, binby, shape, limits, weight=expression, op=tasks.OP_ADD_WEIGHT_MOMENTS_012, selection=selection)progressbar.add_task(task, \"\" % expression)self.executor.schedule(task)return task@delayeddef finish(*stats_args):stats = np.array(stats_args)counts = stats[..., ]with np.errstate(divide=''):with np.errstate(divide='', invalid=''): mean = stats[..., ] / countsraw_moments2 = stats[..., ] / countsvariance = (raw_moments2 - mean**)return vaex.utils.unlistify(waslist, variance)binby = _ensure_strings_from_expressions(binby)waslist, [expressions, ] = vaex.utils.listify(expression)progressbar = vaex.utils.progressbars(progress)limits = self.limits(binby, limits, delay=True)stats = [calculate(expression, limits) for expression in expressions]var = finish(*stats)return self._delay(delay, var)", "docstring": "Calculate the sample variance for the given expression, possible on a grid defined by binby\n\n Example:\n\n >>> df.var(\"vz\")\n 12170.002429456246\n >>> df.var(\"vz\", binby=[\"(x**2+y**2)**0.5\"], shape=4)\n array([ 15271.90481083, 7284.94713504, 3738.52239232, 1449.63418988])\n >>> df.var(\"vz\", binby=[\"(x**2+y**2)**0.5\"], shape=4)**0.5\n array([ 123.57954851, 85.35190177, 61.14345748, 38.0740619 ])\n >>> df.std(\"vz\", binby=[\"(x**2+y**2)**0.5\"], shape=4)\n array([ 123.57954851, 85.35190177, 61.14345748, 38.0740619 ])\n\n :param expression: {expression}\n :param binby: {binby}\n :param limits: {limits}\n :param shape: {shape}\n :param selection: {selection}\n :param delay: {delay}\n :param progress: {progress}\n :return: {return_stat_scalar}", "id": "f6914:c0:m32"} {"signature": "@docsubst@stat_1ddef _agg(self, aggregator, grid, selection=False, delay=False, progress=None):", "body": "task_agg = self._get_task_agg(grid)sub_task = aggregator.add_operations(task_agg)return self._delay(delay, sub_task)", "docstring": ":param selection: {selection}\n:param delay: {delay}\n:param progress: {progress}\n:return: {return_stat_scalar}", "id": "f6914:c1:m32"} {"signature": "@docsubstdef covar(self, x, y, binby=[], limits=None, shape=default_shape, selection=False, delay=False, progress=None):", "body": "@delayeddef cov(mean_x, mean_y, mean_xy):return mean_xy - mean_x * mean_ywaslist, [xlist, ylist] = vaex.utils.listify(x, y)limits = self.limits(binby, limits, selection=selection, delay=True)@delayeddef calculate(limits):results = []for x, y in zip(xlist, ylist):mx = self.mean(x, binby=binby, limits=limits, shape=shape, selection=selection, delay=True, progress=progressbar)my = self.mean(y, binby=binby, limits=limits, shape=shape, selection=selection, delay=True, progress=progressbar)cxy = self.mean(\"\" % (x, y), binby=binby, limits=limits, shape=shape, selection=selection,delay=True, progress=progressbar)results.append(cov(mx, my, cxy))return resultsprogressbar = vaex.utils.progressbars(progress)covars = calculate(limits)@delayeddef finish(covars):value = np.array(vaex.utils.unlistify(waslist, covars))return valuereturn self._delay(delay, finish(delayed_list(covars)))", "docstring": "Calculate the covariance cov[x,y] between and x and y, possibly on a grid defined by binby.\n\n Example:\n\n >>> df.covar(\"x**2+y**2+z**2\", \"-log(-E+1)\")\n array(52.69461456005138)\n >>> df.covar(\"x**2+y**2+z**2\", \"-log(-E+1)\")/(df.std(\"x**2+y**2+z**2\") * df.std(\"-log(-E+1)\"))\n 0.63666373822156686\n >>> df.covar(\"x**2+y**2+z**2\", \"-log(-E+1)\", binby=\"Lz\", shape=4)\n array([ 10.17387143, 51.94954078, 51.24902796, 20.2163929 ])\n\n\n\n :param x: {expression}\n :param y: {expression}\n :param binby: {binby}\n :param limits: {limits}\n :param shape: {shape}\n :param selection: {selection}\n :param delay: {delay}\n :param progress: {progress}\n :return: {return_stat_scalar}", "id": "f6914:c0:m33"} {"signature": "def nop(self, expression, progress=False, delay=False):", "body": "expression = _ensure_string_from_expression(expression)def map(ar):passdef reduce(a, b):passreturn self.map_reduce(map, reduce, [expression], delay=delay, progress=progress, name='', to_numpy=False)", "docstring": "Evaluates expression, and drop the result, usefull for benchmarking, since vaex is usually lazy", "id": "f6914:c0:m13"} {"signature": "@_hiddendef add_column_healpix(self, name=\"\", longitude=\"\", latitude=\"\", degrees=True, healpix_order=, nest=True):", "body": "import healpy as hpif degrees:scale = \"\"else:scale = \"\"phi = self.evaluate(\"\" % (longitude, scale))theta = self.evaluate(\"\" % (latitude, scale))hp_index = hp.ang2pix(hp.order2nside(healpix_order), theta, phi, nest=nest)self.add_column(\"\", hp_index)", "docstring": "Add a healpix (in memory) column based on a longitude and latitude\n\n :param name: Name of column\n :param longitude: longitude expression\n :param latitude: latitude expression (astronomical convenction latitude=90 is north pole)\n :param degrees: If lon/lat are in degrees (default) or radians.\n :param healpix_order: healpix order, >= 0\n :param nest: Nested healpix (default) or ring.", "id": "f6914:c0:m109"} {"signature": "@docsubstdef to_items(self, column_names=None, selection=None, strings=True, virtual=False):", "body": "items = []for name in column_names or self.get_column_names(strings=strings, virtual=virtual):items.append((name, self.evaluate(name, selection=selection)))return items", "docstring": "Return a list of [(column_name, ndarray), ...)] pairs where the ndarray corresponds to the evaluated data\n\n :param column_names: list of column names, to export, when None DataFrame.get_column_names(strings=strings, virtual=virtual) is used\n :param selection: {selection}\n :param strings: argument passed to DataFrame.get_column_names when column_names is None\n :param virtual: argument passed to DataFrame.get_column_names when column_names is None\n :return: list of (name, ndarray) pairs", "id": "f6914:c0:m94"} {"signature": "def select_circle(self, x, y, xc, yc, r, mode=\"\", name=\"\", inclusive=True):", "body": "if inclusive:expr = (self[x] - xc)** + (self[y] - yc)** <= r**else:expr = (self[x] - xc)** + (self[y] - yc)** < r**self.select(boolean_expression=expr, mode=mode, name=name)", "docstring": "Select a circular region centred on xc, yc, with a radius of r.\n\nExample:\n\n>>> df.select_circle('x','y',2,3,1)\n\n:param x: expression for the x space\n:param y: expression for the y space\n:param xc: location of the centre of the circle in x\n:param yc: location of the centre of the circle in y\n:param r: the radius of the circle\n:param name: name of the selection\n:param mode:\n:return:", "id": "f6914:c0:m182"} {"signature": "def select_non_missing(self, drop_nan=True, drop_masked=True, column_names=None, mode=\"\", name=\"\"):", "body": "column_names = column_names or self.get_column_names(virtual=False)def create(current):return selections.SelectionDropNa(drop_nan, drop_masked, column_names, current, mode)self._selection(create, name)", "docstring": "Create a selection that selects rows having non missing values for all columns in column_names.\n\n The name reflect Panda's, no rows are really dropped, but a mask is kept to keep track of the selection\n\n :param drop_nan: drop rows when there is a NaN in any of the columns (will only affect float values)\n :param drop_masked: drop rows when there is a masked value in any of the columns\n :param column_names: The columns to consider, default: all (real, non-virtual) columns\n :param str mode: Possible boolean operator: replace/and/or/xor/subtract\n :param str name: history tree or selection 'slot' to use\n :return:", "id": "f6914:c0:m177"} {"signature": "def __current_sequence_index(self):", "body": "return ", "docstring": "TODO", "id": "f6914:c0:m146"} {"signature": "def select_ellipse(self, x, y, xc, yc, width, height, angle=, mode=\"\", name=\"\", radians=False, inclusive=True):", "body": "if radians:passelse:alpha = np.deg2rad(angle)xr = width / yr = height / r = max(xr, yr)a = xr / rb = yr / rexpr = \"\".format(**locals())if inclusive:expr = ((self[x] - xc) * np.cos(alpha) + (self[y] - yc) * np.sin(alpha))** / a** + ((self[x] - xc) * np.sin(alpha) - (self[y] - yc) * np.cos(alpha))** / b** <= r**else:expr = ((self[x] - xc) * np.cos(alpha) + (self[y] - yc) * np.sin(alpha))** / a** + ((self[x] - xc) * np.sin(alpha) - (self[y] - yc) * np.cos(alpha))** / b** < r**self.select(boolean_expression=expr, mode=mode, name=name)", "docstring": "Select an elliptical region centred on xc, yc, with a certain width, height\nand angle.\n\nExample:\n\n>>> df.select_ellipse('x','y', 2, -1, 5,1, 30, name='my_ellipse')\n\n:param x: expression for the x space\n:param y: expression for the y space\n:param xc: location of the centre of the ellipse in x\n:param yc: location of the centre of the ellipse in y\n:param width: the width of the ellipse (diameter)\n:param height: the width of the ellipse (diameter)\n:param angle: (degrees) orientation of the ellipse, counter-clockwise\n measured from the y axis\n:param name: name of the selection\n:param mode:\n:return:", "id": "f6914:c0:m183"} {"signature": "def select(self, boolean_expression, mode=\"\", name=\"\"):", "body": "raise NotImplementedError", "docstring": "Select rows based on the boolean_expression, if there was a previous selection, the mode is taken into account.\n\n if boolean_expression is None, remove the selection, has_selection() will returns false\n\n Note that per DataFrame, only one selection is possible.\n\n :param str boolean_expression: boolean expression, such as 'x < 0', '(x < 0) || (y > -10)' or None to remove the selection\n :param str mode: boolean operation to perform with the previous selection, \"replace\", \"and\", \"or\", \"xor\", \"subtract\"\n :return: None", "id": "f6914:c0:m103"} {"signature": "def get_private_dir(self, create=False):", "body": "if self.is_local():name = os.path.abspath(self.path).replace(os.path.sep, \"\")[:] name = name.replace(\"\", \"\") else:server = self.servername = \"\" % (server.hostname, server.port, server.base_path.replace(\"\", \"\"), self.name)dir = os.path.join(vaex.utils.get_private_dir(), \"\", name)if create and not os.path.exists(dir):os.makedirs(dir)return dir", "docstring": "Each DataFrame has a directory where files are stored for metadata etc.\n\n Example\n\n >>> import vaex\n >>> ds = vaex.example()\n >>> vaex.get_private_dir()\n '/Users/users/breddels/.vaex/dfs/_Users_users_breddels_vaex-testing_data_helmi-dezeeuw-2000-10p.hdf5'\n\n :param bool create: is True, it will create the directory if it does not exist", "id": "f6914:c0:m68"} {"signature": "def export(self, path, column_names=None, byteorder=\"\", shuffle=False, selection=False, progress=None, virtual=False, sort=None, ascending=True):", "body": "if path.endswith(''):self.export_arrow(path, column_names, byteorder, shuffle, selection, progress=progress, virtual=virtual, sort=sort, ascending=ascending)elif path.endswith(''):self.export_hdf5(path, column_names, byteorder, shuffle, selection, progress=progress, virtual=virtual, sort=sort, ascending=ascending)elif path.endswith(''):self.export_fits(path, column_names, shuffle, selection, progress=progress, virtual=virtual, sort=sort, ascending=ascending)if path.endswith(''):self.export_parquet(path, column_names, shuffle, selection, progress=progress, virtual=virtual, sort=sort, ascending=ascending)", "docstring": "Exports the DataFrame to a file written with arrow\n\n :param DataFrameLocal df: DataFrame to export\n :param str path: path for file\n :param lis[str] column_names: list of column names to export or None for all columns\n :param str byteorder: = for native, < for little endian and > for big endian (not supported for fits)\n :param bool shuffle: export rows in random order\n :param bool selection: export selection or not\n :param progress: progress callback that gets a progress fraction as argument and should return True to continue,\n or a default progress bar when progress=True\n :param: bool virtual: When True, export virtual columns\n :param str sort: expression used for sorting the output\n :param bool ascending: sort ascending (True) or descending\n :return:", "id": "f6914:c1:m21"} {"signature": "@propertydef nbytes(self):", "body": "return self.byte_size()", "docstring": "Alias for `df.byte_size()`, see :meth:`DataFrame.byte_size`.", "id": "f6914:c0:m56"} {"signature": "def has_selection(self, name=\"\"):", "body": "return self.get_selection(name) is not None", "docstring": "Returns True if there is a selection with the given name.", "id": "f6914:c0:m188"} {"signature": "def is_category(self, column):", "body": "column = _ensure_string_from_expression(column)return column in self._categories", "docstring": "Returns true if column is a category.", "id": "f6914:c0:m5"} {"signature": "@vaex.utils.deprecated('')@_hiddendef __call__(self, *expressions, **kwargs):", "body": "raise NotImplementedError", "docstring": "Alias/shortcut for :func:`DataFrame.subspace`", "id": "f6914:c0:m87"} {"signature": "def is_masked(self, column):", "body": "column = _ensure_string_from_expression(column)if column in self.columns:return np.ma.isMaskedArray(self.columns[column])return False", "docstring": "Return if a column is a masked (numpy.ma) column.", "id": "f6914:c0:m59"} {"signature": "def dropna(self, drop_nan=True, drop_masked=True, column_names=None):", "body": "copy = self.copy()copy.select_non_missing(drop_nan=drop_nan, drop_masked=drop_masked, column_names=column_names,name=FILTER_SELECTION_NAME, mode='')return copy", "docstring": "Create a shallow copy of a DataFrame, with filtering set using select_non_missing.\n\n :param drop_nan: drop rows when there is a NaN in any of the columns (will only affect float values)\n :param drop_masked: drop rows when there is a masked value in any of the columns\n :param column_names: The columns to consider, default: all (real, non-virtual) columns\n :rtype: DataFrame", "id": "f6914:c0:m178"} {"signature": "@_hiddendef write_meta(self):", "body": "path = os.path.join(self.get_private_dir(create=True), \"\")units = {key: str(value) for key, value in self.units.items()}meta_info = dict(description=self.description,ucds=self.ucds, units=units, descriptions=self.descriptions,)vaex.utils.write_json_or_yaml(path, meta_info)", "docstring": "Writes all meta data, ucd,description and units\n\n The default implementation is to write this to a file called meta.yaml in the directory defined by\n :func:`DataFrame.get_private_dir`. Other implementation may store this in the DataFrame file itself.\n (For instance the vaex hdf5 implementation does this)\n\n This method is called after virtual columns or variables are added. Upon opening a file, :func:`DataFrame.update_meta`\n is called, so that the information is not lost between sessions.\n\n Note: opening a DataFrame twice may result in corruption of this file.", "id": "f6914:c0:m76"} {"signature": "def select_rectangle(self, x, y, limits, mode=\"\", name=\"\"):", "body": "self.select_box([x, y], limits, mode=mode, name=name)", "docstring": "Select a 2d rectangular box in the space given by x and y, bounds by limits.\n\n Example:\n\n >>> df.select_box('x', 'y', [(0, 10), (0, 1)])\n\n :param x: expression for the x space\n :param y: expression fo the y space\n :param limits: sequence of shape [(x1, x2), (y1, y2)]\n :param mode:", "id": "f6914:c0:m180"} {"signature": "@_hiddendef __call__(self, *expressions, **kwargs):", "body": "import vaex.legacyreturn vaex.legacy.SubspaceLocal(self, expressions, kwargs.get(\"\") or self.executor, delay=kwargs.get(\"\", False))", "docstring": "The local implementation of :func:`DataFrame.__call__`", "id": "f6914:c1:m9"} {"signature": "def selection_redo(self, name=\"\", executor=None):", "body": "logger.debug(\"\")executor = executor or self.executorassert self.selection_can_redo(name=name)selection_history = self.selection_histories[name]index = self.selection_history_indices[name]next = selection_history[index + ]self.selection_history_indices[name] += self.signal_selection_changed.emit(self)logger.debug(\"\", selection_history, index)", "docstring": "Redo selection, for the name.", "id": "f6914:c0:m173"} {"signature": "def selection_can_undo(self, name=\"\"):", "body": "return self.selection_history_indices[name] > -", "docstring": "Can selection name be undone?", "id": "f6914:c0:m174"} {"signature": "@_hiddendef subspaces(self, expressions_list=None, dimensions=None, exclude=None, **kwargs):", "body": "if dimensions is not None:expressions_list = list(itertools.combinations(self.get_column_names(), dimensions))if exclude is not None:import sixdef excluded(expressions):if callable(exclude):return exclude(expressions)elif isinstance(exclude, six.string_types):return exclude in expressionselif isinstance(exclude, (list, tuple)):for e in exclude:if isinstance(e, six.string_types):if e in expressions:return Trueelif isinstance(e, (list, tuple)):if set(e).issubset(expressions):return Trueelse:raise ValueError(\"\")else:raise ValueError(\"\")return Falseexpressions_list = [expr for expr in expressions_list if not excluded(expr)]logger.debug(\"\", expressions_list)import vaex.legacyreturn vaex.legacy.Subspaces([self(*expressions, **kwargs) for expressions in expressions_list])", "docstring": "Generate a Subspaces object, based on a custom list of expressions or all possible combinations based on\n dimension\n\n :param expressions_list: list of list of expressions, where the inner list defines the subspace\n :param dimensions: if given, generates a subspace with all possible combinations for that dimension\n :param exclude: list of", "id": "f6914:c0:m85"} {"signature": "@docsubstdef limits_percentage(self, expression, percentage=, square=False, delay=False):", "body": "import scipylogger.info(\"\", expression, percentage)waslist, [expressions, ] = vaex.utils.listify(expression)limits = []for expr in expressions:subspace = self(expr)limits_minmax = subspace.minmax()vmin, vmax = limits_minmax[]size = * counts = subspace.histogram(size=size, limits=limits_minmax)cumcounts = np.concatenate([[], np.cumsum(counts)])cumcounts /= cumcounts.max()f = ( - percentage / ) / x = np.linspace(vmin, vmax, size + )l = scipy.interp([f, - f], cumcounts, x)limits.append(l)return vaex.utils.unlistify(waslist, limits)", "docstring": "Calculate the [min, max] range for expression, containing approximately a percentage of the data as defined\n by percentage.\n\n The range is symmetric around the median, i.e., for a percentage of 90, this gives the same results as:\n\n Example:\n\n >>> df.limits_percentage(\"x\", 90)\n array([-12.35081376, 12.14858052]\n >>> df.percentile_approx(\"x\", 5), df.percentile_approx(\"x\", 95)\n (array([-12.36813152]), array([ 12.13275818]))\n\n NOTE: this value is approximated by calculating the cumulative distribution on a grid.\n NOTE 2: The values above are not exactly the same, since percentile and limits_percentage do not share the same code\n\n :param expression: {expression_limits}\n :param float percentage: Value between 0 and 100\n :param delay: {delay}\n :return: {return_limits}", "id": "f6914:c0:m43"} {"signature": "def add_virtual_column(self, name, expression, unique=False):", "body": "type = \"\" if name in self.virtual_columns else \"\"expression = _ensure_string_from_expression(expression)if name in self.get_column_names(virtual=False):renamed = '' +vaex.utils.find_valid_name(name, used=self.get_column_names())expression = self._rename(name, renamed, expression)[].expressionname = vaex.utils.find_valid_name(name, used=[] if not unique else self.get_column_names())self.virtual_columns[name] = expressionself.column_names.append(name)self._save_assign_expression(name)self.signal_column_changed.emit(self, name, \"\")", "docstring": "Add a virtual column to the DataFrame.\n\n Example:\n\n >>> df.add_virtual_column(\"r\", \"sqrt(x**2 + y**2 + z**2)\")\n >>> df.select(\"r < 10\")\n\n :param: str name: name of virtual column\n :param: expression: expression for the column\n :param str unique: if name is already used, make it unique by adding a postfix, e.g. _1, or _2", "id": "f6914:c0:m126"} {"signature": "def add_variable(self, name, expression, overwrite=True, unique=True):", "body": "if unique or overwrite or name not in self.variables:existing_names = self.get_column_names(virtual=False) + list(self.variables.keys())name = vaex.utils.find_valid_name(name, used=[] if not unique else existing_names)self.variables[name] = expressionself.signal_variable_changed.emit(self, name, \"\")if unique:return name", "docstring": "Add a variable to to a DataFrame.\n\n A variable may refer to other variables, and virtual columns and expression may refer to variables.\n\n Example\n\n >>> df.add_variable('center', 0)\n >>> df.add_virtual_column('x_prime', 'x-center')\n >>> df.select('x_prime < 0')\n\n :param: str name: name of virtual varible\n :param: expression: expression for the variable", "id": "f6914:c0:m129"} {"signature": "def __len__(self):", "body": "if not self.filtered:return self._length_unfilteredelse:return int(self.count())", "docstring": "Returns the number of rows in the DataFrame (filtering applied).", "id": "f6914:c0:m153"} {"signature": "def concat(self, other):", "body": "dfs = []if isinstance(self, DataFrameConcatenated):dfs.extend(self.dfs)else:dfs.extend([self])if isinstance(other, DataFrameConcatenated):dfs.extend(other.dfs)else:dfs.extend([other])return DataFrameConcatenated(dfs)", "docstring": "Concatenates two DataFrames, adding the rows of one the other DataFrame to the current, returned in a new DataFrame.\n\n No copy of the data is made.\n\n :param other: The other DataFrame that is concatenated with this DataFrame\n :return: New DataFrame with the rows concatenated\n :rtype: DataFrameConcatenated", "id": "f6914:c1:m13"} {"signature": "def select(self, boolean_expression, mode=\"\", name=\"\", executor=None):", "body": "boolean_expression = _ensure_string_from_expression(boolean_expression)if boolean_expression is None and not self.has_selection(name=name):pass self.signal_selection_changed.emit(self) else:def create(current):return selections.SelectionExpression(boolean_expression, current, mode) if boolean_expression else Noneself._selection(create, name)", "docstring": "Perform a selection, defined by the boolean expression, and combined with the previous selection using the given mode.\n\n Selections are recorded in a history tree, per name, undo/redo can be done for them separately.\n\n :param str boolean_expression: Any valid column expression, with comparison operators\n :param str mode: Possible boolean operator: replace/and/or/xor/subtract\n :param str name: history tree or selection 'slot' to use\n :param executor:\n :return:", "id": "f6914:c0:m176"} {"signature": "def describe(self, strings=True, virtual=True, selection=None):", "body": "import pandas as pdN = len(self)columns = {}for feature in self.get_column_names(strings=strings, virtual=virtual)[:]:dtype = str(self.dtype(feature)) if self.dtype(feature) != str else ''if self.dtype(feature) == str_type or self.dtype(feature).kind in ['', '', '']:count = self.count(feature, selection=selection, delay=True)self.execute()count = count.get()columns[feature] = ((dtype, count, N-count, '', '', '', ''))else:count = self.count(feature, selection=selection, delay=True)mean = self.mean(feature, selection=selection, delay=True)std = self.std(feature, selection=selection, delay=True)minmax = self.minmax(feature, selection=selection, delay=True)self.execute()count, mean, std, minmax = count.get(), mean.get(), std.get(), minmax.get()count = int(count)columns[feature] = ((dtype, count, N-count, mean, std, minmax[], minmax[]))return pd.DataFrame(data=columns, index=['', '', '', '', '', '', ''])", "docstring": "Give a description of the DataFrame.\n\n >>> import vaex\n >>> df = vaex.example()[['x', 'y', 'z']]\n >>> df.describe()\n x y z\n dtype float64 float64 float64\n count 330000 330000 330000\n missing 0 0 0\n mean -0.0671315 -0.0535899 0.0169582\n std 7.31746 7.78605 5.05521\n min -128.294 -71.5524 -44.3342\n max 271.366 146.466 50.7185\n >>> df.describe(selection=df.x > 0)\n x y z\n dtype float64 float64 float64\n count 164060 164060 164060\n missing 165940 165940 165940\n mean 5.13572 -0.486786 -0.0868073\n std 5.18701 7.61621 5.02831\n min 1.51635e-05 -71.5524 -44.3342\n max 271.366 78.0724 40.2191\n\n :param bool strings: Describe string columns or not\n :param bool virtual: Describe virtual columns or not\n :param selection: Optional selection to use.\n :return: Pandas dataframe", "id": "f6914:c0:m137"} {"signature": "def set_active_range(self, i1, i2):", "body": "logger.debug(\"\", (i1, i2))self._active_fraction = (i2 - i1) / float(self.length_original())self._index_start = i1self._index_end = i2self.select(None)self.set_current_row(None)self._length_unfiltered = i2 - i1self.signal_active_fraction_changed.emit(self, self._active_fraction)", "docstring": "Sets the active_fraction, set picked row to None, and remove selection.\n\n TODO: we may be able to keep the selection, if we keep the expression, and also the picked row", "id": "f6914:c0:m161"} {"signature": "@docsubst@stat_1ddef mean(self, expression, binby=[], limits=None, shape=default_shape, selection=False, delay=False, progress=None, edges=False):", "body": "return self._compute_agg('', expression, binby, limits, shape, selection, delay, edges, progress)logger.debug(\"\", expression, binby, limits, shape, selection, delay)expression = _ensure_strings_from_expressions(expression)selection = _ensure_strings_from_expressions(selection)binby = _ensure_strings_from_expressions(binby)@delayeddef calculate(expression, limits):task = tasks.TaskStatistic(self, binby, shape, limits, weight=expression, op=tasks.OP_ADD_WEIGHT_MOMENTS_01, selection=selection)self.executor.schedule(task)progressbar.add_task(task, \"\" % expression)return task@delayeddef finish(*stats_args):stats = np.array(stats_args)counts = stats[..., ]with np.errstate(divide='', invalid=''):mean = stats[..., ] / countsreturn vaex.utils.unlistify(waslist, mean)waslist, [expressions, ] = vaex.utils.listify(expression)progressbar = vaex.utils.progressbars(progress)limits = self.limits(binby, limits, delay=True)stats = [calculate(expression, limits) for expression in expressions]var = finish(*stats)return self._delay(delay, var)", "docstring": "Calculate the mean for expression, possibly on a grid defined by binby.\n\n Example:\n\n >>> df.mean(\"x\")\n -0.067131491264005971\n >>> df.mean(\"(x**2+y**2)**0.5\", binby=\"E\", shape=4)\n array([ 2.43483742, 4.41840721, 8.26742458, 15.53846476])\n\n :param expression: {expression}\n :param binby: {binby}\n :param limits: {limits}\n :param shape: {shape}\n :param selection: {selection}\n :param delay: {delay}\n :param progress: {progress}\n :return: {return_stat_scalar}", "id": "f6914:c0:m28"} {"signature": "def ordinal_encode(self, column, values=None, inplace=False):", "body": "column = _ensure_string_from_expression(column)df = self if inplace else self.copy()df_unfiltered = df.copy()df_unfiltered.select_nothing(name=FILTER_SELECTION_NAME)df_unfiltered._length_unfiltered = df._length_originaldf_unfiltered.set_active_range(, df._length_original)found_values, codes = df_unfiltered.unique(column, return_inverse=True)if values is None:values = found_valueselse:translation = np.zeros(len(found_values), dtype=np.uint64)missing_value = len(found_values)for i, found_value in enumerate(found_values):try:found_value = found_value.decode('')except:passif found_value not in values: translation[i] = missing_valueelse:translation[i] = values.index(found_value)codes = translation[codes]if missing_value in translation:codes = np.ma.masked_array(codes, codes==missing_value)original_column = df.rename_column(column, '' + column, unique=True)labels = [str(k) for k in values]df.add_column(column, codes)df._categories[column] = dict(labels=labels, N=len(values), values=values)return df", "docstring": "Encode column as ordinal values and mark it as categorical.\n\n The existing column is renamed to a hidden column and replaced by a numerical columns\n with values between [0, len(values)-1].", "id": "f6914:c1:m3"} {"signature": "def state_load(self, f, use_active_range=False):", "body": "state = vaex.utils.read_json_or_yaml(f)self.state_set(state, use_active_range=use_active_range)", "docstring": "Load a state previously stored by :meth:`DataFrame.state_store`, see also :meth:`DataFrame.state_set`.", "id": "f6914:c0:m72"} {"signature": "@registerdef mean(expression):", "body": "return AggregatorDescriptorMean('', expression, '')", "docstring": "Creates a mean aggregation", "id": "f6916:m3"} {"signature": "@registerdef sum(expression):", "body": "return AggregatorDescriptorBasic('', expression, '')", "docstring": "Creates a sum aggregation", "id": "f6916:m2"} {"signature": "def iter_properties(fh, comments=False):", "body": "for line in _property_lines(fh):key, value = _split_key_value(line)if key is not COMMENT:key = _unescape(key)elif not comments:continueyield key, _unescape(value)", "docstring": "Incrementally read properties from a Java .properties file.\n\nYields tuples of key/value pairs.\n\nIf ``comments`` is `True`, comments will be included with ``jprops.COMMENT``\nin place of the key.\n\n:param fh: a readable file-like object\n:param comments: should include comments (default: False)", "id": "f6925:m5"} {"signature": "def _graphviz(self, dot=None):", "body": "from graphviz import Graph, Digraphnode = self._graph()dot = dot or Digraph(comment=self.expression)def walk(node):if isinstance(node, six.string_types):dot.node(node, node)return node, nodeelse:node_repr, fname, fobj, deps = nodenode_id = node_reprdot.node(node_id, node_repr)for dep in deps:dep_id, dep = walk(dep)dot.edge(node_id, dep_id)return node_id, nodewalk(node)return dot", "docstring": "Return a graphviz.Digraph object with a graph of the expression", "id": "f6960:c4:m10"} {"signature": "def __call__(self, *args, **kwargs):", "body": "return self.f(*args, **kwargs)", "docstring": "Forward the call to the real function", "id": "f6960:c6:m6"} {"signature": "def sum(self, binby=[], limits=None, shape=default_shape, selection=False, delay=False, progress=None):", "body": "kwargs = dict(locals())del kwargs['']kwargs[''] = self.expressionreturn self.ds.sum(**kwargs)", "docstring": "Shortcut for ds.sum(expression, ...), see `Dataset.sum`", "id": "f6960:c4:m16"} {"signature": "@propertydef masked(self):", "body": "return self.ds.is_masked(self.expression)", "docstring": "Alias to df.is_masked(expression)", "id": "f6960:c4:m25"} {"signature": "def var(self, binby=[], limits=None, shape=default_shape, selection=False, delay=False, progress=None):", "body": "kwargs = dict(locals())del kwargs['']kwargs[''] = self.expressionreturn self.ds.var(**kwargs)", "docstring": "Shortcut for ds.std(expression, ...), see `Dataset.var`", "id": "f6960:c4:m19"} {"signature": "def mean(self, binby=[], limits=None, shape=default_shape, selection=False, delay=False, progress=None):", "body": "kwargs = dict(locals())del kwargs['']kwargs[''] = self.expressionreturn self.ds.mean(**kwargs)", "docstring": "Shortcut for ds.mean(expression, ...), see `Dataset.mean`", "id": "f6960:c4:m17"} {"signature": "@propertydef str(self):", "body": "return StringOperations(self)", "docstring": "Gives access to string operations", "id": "f6960:c4:m2"} {"signature": "def std(self, binby=[], limits=None, shape=default_shape, selection=False, delay=False, progress=None):", "body": "kwargs = dict(locals())del kwargs['']kwargs[''] = self.expressionreturn self.ds.std(**kwargs)", "docstring": "Shortcut for ds.std(expression, ...), see `Dataset.std`", "id": "f6960:c4:m18"} {"signature": "@register_function(scope='')def str_rindex(x, sub, start=, end=None):", "body": "return str_rfind(x, sub, start, end)", "docstring": "Returns the highest indices in each string in a column, where the provided substring is fully contained between within a\n sample. If the substring is not found, -1 is returned. Same as `str.rfind`.\n\n :param str sub: A substring to be found in the samples\n :param int start:\n :param int end:\n :returns: an expression containing the highest indices specifying the start of the substring.\n\n Example:\n\n >>> import vaex\n >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.']\n >>> df = vaex.from_arrays(text=text)\n >>> df\n # text\n 0 Something\n 1 very pretty\n 2 is coming\n 3 our\n 4 way.\n\n >>> df.text.str.rindex(sub=\"et\")\n Expression = str_rindex(text, sub='et')\n Length: 5 dtype: int64 (expression)\n -----------------------------------\n 0 3\n 1 7\n 2 -1\n 3 -1\n 4 -1", "id": "f6962:m35"} {"signature": "@register_function(scope='', as_property=True)def dt_month_name(x):", "body": "import pandas as pdreturn pd.Series(_pandas_dt_fix(x)).dt.month_name().values.astype(str)", "docstring": "Returns the month names of a datetime sample in English.\n\n :returns: an expression containing the month names extracted from a datetime column.\n\n Example:\n\n >>> import vaex\n >>> import numpy as np\n >>> date = np.array(['2009-10-12T03:31:00', '2016-02-11T10:17:34', '2015-11-12T11:34:22'], dtype=np.datetime64)\n >>> df = vaex.from_arrays(date=date)\n >>> df\n # date\n 0 2009-10-12 03:31:00\n 1 2016-02-11 10:17:34\n 2 2015-11-12 11:34:22\n\n >>> df.date.dt.month_name\n Expression = dt_month_name(date)\n Length: 3 dtype: str (expression)\n ---------------------------------\n 0 October\n 1 February\n 2 November", "id": "f6962:m8"} {"signature": "@register_function(scope='', as_property=True)def dt_weekofyear(x):", "body": "import pandas as pdreturn pd.Series(x).dt.weekofyear.values", "docstring": "Returns the week ordinal of the year.\n\n :returns: an expression containing the week ordinal of the year, extracted from a datetime column.\n\n Example:\n\n >>> import vaex\n >>> import numpy as np\n >>> date = np.array(['2009-10-12T03:31:00', '2016-02-11T10:17:34', '2015-11-12T11:34:22'], dtype=np.datetime64)\n >>> df = vaex.from_arrays(date=date)\n >>> df\n # date\n 0 2009-10-12 03:31:00\n 1 2016-02-11 10:17:34\n 2 2015-11-12 11:34:22\n\n >>> df.date.dt.weekofyear\n Expression = dt_weekofyear(date)\n Length: 3 dtype: int64 (expression)\n -----------------------------------\n 0 42\n 1 6\n 2 46", "id": "f6962:m11"} {"signature": "@register_function(scope='')def str_index(x, sub, start=, end=None):", "body": "return str_find(x, sub, start, end)", "docstring": "Returns the lowest indices in each string in a column, where the provided substring is fully contained between within a\n sample. If the substring is not found, -1 is returned. It is the same as `str.find`.\n\n :param str sub: A substring to be found in the samples\n :param int start:\n :param int end:\n :returns: an expression containing the lowest indices specifying the start of the substring.\n\n Example:\n\n >>> import vaex\n >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.']\n >>> df = vaex.from_arrays(text=text)\n >>> df\n # text\n 0 Something\n 1 very pretty\n 2 is coming\n 3 our\n 4 way.\n\n >>> df.text.str.index(sub=\"et\")\n Expression = str_find(text, sub='et')\n Length: 5 dtype: int64 (expression)\n -----------------------------------\n 0 3\n 1 7\n 2 -1\n 3 -1\n 4 -1", "id": "f6962:m23"} {"signature": "@register_function(scope='')def str_repeat(x, repeats):", "body": "sl = _to_string_sequence(x).repeat(repeats)return column.ColumnStringArrow(sl.bytes, sl.indices, sl.length, sl.offset, string_sequence=sl)", "docstring": "Duplicate each string in a column.\n\n :param int repeats: number of times each string sample is to be duplicated.\n :returns: an expression containing the duplicated strings\n\n Example:\n\n >>> import vaex\n >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.']\n >>> df = vaex.from_arrays(text=text)\n >>> df\n # text\n 0 Something\n 1 very pretty\n 2 is coming\n 3 our\n 4 way.\n\n >>> df.text.str.repeat(3)\n Expression = str_repeat(text, 3)\n Length: 5 dtype: str (expression)\n ---------------------------------\n 0 SomethingSomethingSomething\n 1 very prettyvery prettyvery pretty\n 2 is comingis comingis coming\n 3 ourourour\n 4 way.way.way.", "id": "f6962:m32"} {"signature": "@register_function(scope='')def str_rstrip(x, to_strip=None):", "body": "sl = _to_string_sequence(x).rstrip('' if to_strip is None else to_strip) if to_strip != '' else xreturn column.ColumnStringArrow(sl.bytes, sl.indices, sl.length, sl.offset, string_sequence=sl)", "docstring": "Remove trailing characters from a string sample.\n\n :param str to_strip: The string to be removed\n :returns: an expression containing the modified string column.\n\n Example:\n\n >>> import vaex\n >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.']\n >>> df = vaex.from_arrays(text=text)\n >>> df\n # text\n 0 Something\n 1 very pretty\n 2 is coming\n 3 our\n 4 way.\n\n >>> df.text.str.rstrip(to_strip='ing')\n Expression = str_rstrip(text, to_strip='ing')\n Length: 5 dtype: str (expression)\n ---------------------------------\n 0 Someth\n 1 very pretty\n 2 is com\n 3 our\n 4 way.", "id": "f6962:m37"} {"signature": "@register_function(scope='', as_property=True)def dt_is_leap_year(x):", "body": "import pandas as pdreturn pd.Series(x).dt.is_leap_year.values", "docstring": "Check whether a year is a leap year.\n\n :returns: an expression which evaluates to True if a year is a leap year, and to False otherwise.\n\n Example:\n\n >>> import vaex\n >>> import numpy as np\n >>> date = np.array(['2009-10-12T03:31:00', '2016-02-11T10:17:34', '2015-11-12T11:34:22'], dtype=np.datetime64)\n >>> df = vaex.from_arrays(date=date)\n >>> df\n # date\n 0 2009-10-12 03:31:00\n 1 2016-02-11 10:17:34\n 2 2015-11-12 11:34:22\n\n >>> df.date.dt.is_leap_year\n Expression = dt_is_leap_year(date)\n Length: 3 dtype: bool (expression)\n ----------------------------------\n 0 False\n 1 True\n 2 False", "id": "f6962:m5"} {"signature": "@register_function(scope='')def str_title(x):", "body": "sl = _to_string_sequence(x).title()return column.ColumnStringArrow(sl.bytes, sl.indices, sl.length, sl.offset, string_sequence=sl)", "docstring": "Converts all string samples to titlecase.\n\n :returns: an expression containing the converted strings.\n\n Example:\n\n >>> import vaex\n >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.']\n >>> df = vaex.from_arrays(text=text)\n >>> df\n # text\n 0 Something\n 1 very pretty\n 2 is coming\n 3 our\n 4 way.\n\n >>> df.text.str.title()\n Expression = str_title(text)\n Length: 5 dtype: str (expression)\n ---------------------------------\n 0 Something\n 1 Very Pretty\n 2 Is Coming\n 3 Our\n 4 Way.", "id": "f6962:m42"} {"signature": "@register_function(scope='')def str_center(x, width, fillchar=''):", "body": "sl = _to_string_sequence(x).pad(width, fillchar, True, True)return column.ColumnStringArrow(sl.bytes, sl.indices, sl.length, sl.offset, string_sequence=sl)", "docstring": "Fills the left and right side of the strings with additional characters, such that the sample has a total of `width`\n characters.\n\n :param int width: The total number of characters of the resulting string sample.\n :param str fillchar: The character used for filling.\n :returns: an expression containing the filled strings.\n\n Example:\n >>> import vaex\n >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.']\n >>> df = vaex.from_arrays(text=text)\n >>> df\n # text\n 0 Something\n 1 very pretty\n 2 is coming\n 3 our\n 4 way.\n\n >>> df.text.str.center(width=11, fillchar='!')\n Expression = str_center(text, width=11, fillchar='!')\n Length: 5 dtype: str (expression)\n ---------------------------------\n 0 !Something!\n 1 very pretty\n 2 !is coming!\n 3 !!!!our!!!!\n 4 !!!!way.!!!", "id": "f6962:m17"} {"signature": "@register_function(scope='')def str_rjust(x, width, fillchar=''):", "body": "sl = _to_string_sequence(x).pad(width, fillchar, True, False)return column.ColumnStringArrow(sl.bytes, sl.indices, sl.length, sl.offset, string_sequence=sl)", "docstring": "Fills the left side of string samples with a specified character such that the strings are left-hand justified.\n\n :param int width: The minimal width of the strings.\n :param str fillchar: The character used for filling.\n :returns: an expression containing the filled strings.\n\n Example:\n\n >>> import vaex\n >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.']\n >>> df = vaex.from_arrays(text=text)\n >>> df\n # text\n 0 Something\n 1 very pretty\n 2 is coming\n 3 our\n 4 way.\n\n >>> df.text.str.rjust(width=10, fillchar='!')\n Expression = str_rjust(text, width=10, fillchar='!')\n Length: 5 dtype: str (expression)\n ---------------------------------\n 0 !Something\n 1 very pretty\n 2 !is coming\n 3 !!!!!!!our\n 4 !!!!!!way.", "id": "f6962:m36"} {"signature": "@register_function(scope='', as_property=True)def dt_hour(x):", "body": "import pandas as pdreturn pd.Series(x).dt.hour.values", "docstring": "Extracts the hour out of a datetime samples.\n\n :returns: an expression containing the hour extracted from a datetime column.\n\n Example:\n\n >>> import vaex\n >>> import numpy as np\n >>> date = np.array(['2009-10-12T03:31:00', '2016-02-11T10:17:34', '2015-11-12T11:34:22'], dtype=np.datetime64)\n >>> df = vaex.from_arrays(date=date)\n >>> df\n # date\n 0 2009-10-12 03:31:00\n 1 2016-02-11 10:17:34\n 2 2015-11-12 11:34:22\n\n >>> df.date.dt.hour\n Expression = dt_hour(date)\n Length: 3 dtype: int64 (expression)\n -----------------------------------\n 0 3\n 1 10\n 2 11", "id": "f6962:m12"} {"signature": "@register_function(scope='')def str_contains(x, pattern, regex=True):", "body": "return _to_string_sequence(x).search(pattern, regex)", "docstring": "Check if a string pattern or regex is contained within a sample of a string column.\n\n :param str pattern: A string or regex pattern\n :param bool regex: If True,\n :returns: an expression which is evaluated to True if the pattern is found in a given sample, and it is False otherwise.\n\n Example:\n\n >>> import vaex\n >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.']\n >>> df = vaex.from_arrays(text=text)\n >>> df\n # text\n 0 Something\n 1 very pretty\n 2 is coming\n 3 our\n 4 way.\n\n >>> df.text.str.contains('very')\n Expression = str_contains(text, 'very')\n Length: 5 dtype: bool (expression)\n ----------------------------------\n 0 False\n 1 True\n 2 False\n 3 False\n 4 False", "id": "f6962:m18"} {"signature": "@register_function(scope='')def str_capitalize(x):", "body": "sl = _to_string_sequence(x).capitalize()return column.ColumnStringArrow(sl.bytes, sl.indices, sl.length, sl.offset, string_sequence=sl)", "docstring": "Capitalize the first letter of a string sample.\n\n :returns: an expression containing the capitalized strings.\n\n Example:\n\n >>> import vaex\n >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.']\n >>> df = vaex.from_arrays(text=text)\n >>> df\n # text\n 0 Something\n 1 very pretty\n 2 is coming\n 3 our\n 4 way.\n\n >>> df.text.str.capitalize()\n Expression = str_capitalize(text)\n Length: 5 dtype: str (expression)\n ---------------------------------\n 0 Something\n 1 Very pretty\n 2 Is coming\n 3 Our\n 4 Way.", "id": "f6962:m15"} {"signature": "@register_function(scope='', as_property=True)def dt_dayofweek(x):", "body": "import pandas as pdreturn pd.Series(x).dt.dayofweek.values", "docstring": "Obtain the day of the week with Monday=0 and Sunday=6\n\n :returns: an expression containing the day of week.\n\n Example:\n\n >>> import vaex\n >>> import numpy as np\n >>> date = np.array(['2009-10-12T03:31:00', '2016-02-11T10:17:34', '2015-11-12T11:34:22'], dtype=np.datetime64)\n >>> df = vaex.from_arrays(date=date)\n >>> df\n # date\n 0 2009-10-12 03:31:00\n 1 2016-02-11 10:17:34\n 2 2015-11-12 11:34:22\n\n >>> df.date.dt.dayofweek\n Expression = dt_dayofweek(date)\n Length: 3 dtype: int64 (expression)\n -----------------------------------\n 0 0\n 1 3\n 2 3", "id": "f6962:m3"} {"signature": "@register_function(scope='', as_property=True)def dt_day(x):", "body": "import pandas as pdreturn pd.Series(x).dt.day.values", "docstring": "Extracts the day from a datetime sample.\n\n :returns: an expression containing the day extracted from a datetime column.\n\n Example:\n\n >>> import vaex\n >>> import numpy as np\n >>> date = np.array(['2009-10-12T03:31:00', '2016-02-11T10:17:34', '2015-11-12T11:34:22'], dtype=np.datetime64)\n >>> df = vaex.from_arrays(date=date)\n >>> df\n # date\n 0 2009-10-12 03:31:00\n 1 2016-02-11 10:17:34\n 2 2015-11-12 11:34:22\n\n >>> df.date.dt.day\n Expression = dt_day(date)\n Length: 3 dtype: int64 (expression)\n -----------------------------------\n 0 12\n 1 11\n 2 12", "id": "f6962:m9"} {"signature": "@register_function(scope='')def str_lower(x):", "body": "sl = _to_string_sequence(x).lower()return column.ColumnStringArrow(sl.bytes, sl.indices, sl.length, sl.offset, string_sequence=sl)", "docstring": "Converts string samples to lower case.\n\n :returns: an expression containing the converted strings.\n\n Example:\n\n >>> import vaex\n >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.']\n >>> df = vaex.from_arrays(text=text)\n >>> df\n # text\n 0 Something\n 1 very pretty\n 2 is coming\n 3 our\n 4 way.\n\n >>> df.text.str.lower()\n Expression = str_lower(text)\n Length: 5 dtype: str (expression)\n ---------------------------------\n 0 something\n 1 very pretty\n 2 is coming\n 3 our\n 4 way.", "id": "f6962:m28"} {"signature": "@register_function(scope='')def str_rfind(x, sub, start=, end=None):", "body": "return _to_string_sequence(x).find(sub, start, if end is None else end, end is None, False)", "docstring": "Returns the highest indices in each string in a column, where the provided substring is fully contained between within a\n sample. If the substring is not found, -1 is returned.\n\n :param str sub: A substring to be found in the samples\n :param int start:\n :param int end:\n :returns: an expression containing the highest indices specifying the start of the substring.\n\n Example:\n\n >>> import vaex\n >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.']\n >>> df = vaex.from_arrays(text=text)\n >>> df\n # text\n 0 Something\n 1 very pretty\n 2 is coming\n 3 our\n 4 way.\n\n >>> df.text.str.rfind(sub=\"et\")\n Expression = str_rfind(text, sub='et')\n Length: 5 dtype: int64 (expression)\n -----------------------------------\n 0 3\n 1 7\n 2 -1\n 3 -1\n 4 -1", "id": "f6962:m34"} {"signature": "def server(url, **kwargs):", "body": "from vaex.remote import ServerResturl = urlparse(url)if url.scheme == \"\":websocket = Trueelse:websocket = Falseassert url.scheme in [\"\", \"\"]port = url.portbase_path = url.pathhostname = url.hostnamereturn vaex.remote.ServerRest(hostname, base_path=base_path, port=port, websocket=websocket, **kwargs)", "docstring": "Connect to hostname supporting the vaex web api.\n\n :param str hostname: hostname or ip address of server\n :return vaex.dataframe.ServerRest: returns a server object, note that it does not connect to the server yet, so this will always succeed\n :rtype: ServerRest", "id": "f6963:m16"} {"signature": "def read_csv_and_convert(path, shuffle=False, copy_index=True, **kwargs):", "body": "from concurrent.futures import ProcessPoolExecutorimport pandas as pdfilenames = glob.glob(path)if len(filenames) > :filename_hdf5 = _convert_name(filenames, shuffle=shuffle)filename_hdf5_noshuffle = _convert_name(filenames, shuffle=False)if not os.path.exists(filename_hdf5):if not os.path.exists(filename_hdf5_noshuffle):for filename in filenames:read_csv_and_convert(filename, shuffle=shuffle, copy_index=copy_index, **kwargs)ds = open_many([_convert_name(k, shuffle=shuffle) for k in filenames])else:ds = open(filename_hdf5_noshuffle)ds.export_hdf5(filename_hdf5, shuffle=shuffle)return open(filename_hdf5)else:filename = filenames[]filename_hdf5 = _convert_name(filename, shuffle=shuffle)filename_hdf5_noshuffle = _convert_name(filename, shuffle=False)if not os.path.exists(filename_hdf5):if not os.path.exists(filename_hdf5_noshuffle):df = pd.read_csv(filename, **kwargs)ds = from_pandas(df, copy_index=copy_index)else:ds = open(filename_hdf5_noshuffle)ds.export_hdf5(filename_hdf5, shuffle=shuffle)return open(filename_hdf5)", "docstring": "Convert a path (or glob pattern) to a single hdf5 file, will open the hdf5 file if exists.\n\n Example:\n >>> vaex.read_csv_and_convert('test-*.csv', shuffle=True) # this may take a while\n >>> vaex.read_csv_and_convert('test-*.csv', shuffle=True) # 2nd time it is instant\n\n :param str path: path of file or glob pattern for multiple files\n :param bool shuffle: shuffle DataFrame when converting to hdf5\n :param bool copy_index: by default pandas will create an index (row number), set to false if you want to drop that\n :param kwargs: parameters passed to pandas' read_cvs", "id": "f6963:m15"} {"signature": "def concat(dfs):", "body": "ds = reduce((lambda x, y: x.concat(y)), dfs)return ds", "docstring": "Concatenate a list of DataFrames.\n\n :rtype: DataFrame", "id": "f6963:m24"} {"signature": "def from_dict(data):", "body": "return vaex.from_arrays(**data)", "docstring": "Create an in memory dataset from a dict with column names as keys and list/numpy-arrays as values\n\n Example\n\n >>> data = {'A':[1,2,3],'B':['a','b','c']}\n >>> vaex.from_dict(data)\n # A B\n 0 1 'a'\n 1 2 'b'\n 2 3 'c'\n\n :param data: A dict of {columns:[value, value,...]}\n :rtype: DataFrame", "id": "f6963:m6"} {"signature": "def from_astropy_table(table):", "body": "import vaex.file.otherreturn vaex.file.other.DatasetAstropyTable(table=table)", "docstring": "Create a vaex DataFrame from an Astropy Table.", "id": "f6963:m5"} {"signature": "def from_samp(username=None, password=None):", "body": "print(\"\")import vaex.sampt = vaex.samp.single_table(username=username, password=password)return from_astropy_table(t.to_table())", "docstring": "Connect to a SAMP Hub and wait for a single table load event, disconnect, download the table and return the DataFrame.\n\n Useful if you want to send a single table from say TOPCAT to vaex in a python console or notebook.", "id": "f6963:m4"} {"signature": "def open(path, convert=False, shuffle=False, copy_index=True, *args, **kwargs):", "body": "import vaextry:if path in aliases:path = aliases[path]if path.startswith(\"\") or path.startswith(\"\"): server, DataFrame = path.rsplit(\"\", )server = vaex.server(server, **kwargs)DataFrames = server.DataFrames(as_dict=True)if DataFrame not in DataFrames:raise KeyError(\"\" % (DataFrame, \"\".join(DataFrames.keys())))return DataFrames[DataFrame]if path.startswith(\"\"):import vaex.distributedreturn vaex.distributed.open(path, *args, **kwargs)else:import vaex.fileimport globfilenames = list(sorted(glob.glob(path)))ds = Noneif len(filenames) == :raise IOError(''.format(path))filename_hdf5 = _convert_name(filenames, shuffle=shuffle)filename_hdf5_noshuffle = _convert_name(filenames, shuffle=False)if len(filenames) == :path = filenames[]ext = os.path.splitext(path)[]if os.path.exists(filename_hdf5) and convert: if convert:ds = vaex.file.open(filename_hdf5)else:ds = vaex.file.open(filename_hdf5, *args, **kwargs)else:if ext == '': ds = from_csv(path, copy_index=copy_index, **kwargs)else:ds = vaex.file.open(path, *args, **kwargs)if convert:ds.export_hdf5(filename_hdf5, shuffle=shuffle)ds = vaex.file.open(filename_hdf5) if ds is None:if os.path.exists(path):raise IOError(''.format(path))if os.path.exists(path):raise IOError(''.format(path))elif len(filenames) > :if convert not in [True, False]:filename_hdf5 = convertelse:filename_hdf5 = _convert_name(filenames, shuffle=shuffle)if os.path.exists(filename_hdf5) and convert: ds = open(filename_hdf5)else:DataFrames = []for filename in filenames:DataFrames.append(open(filename, convert=bool(convert), shuffle=shuffle, **kwargs))ds = vaex.dataframe.DataFrameConcatenated(DataFrames)if convert:ds.export_hdf5(filename_hdf5, shuffle=shuffle)ds = vaex.file.open(filename_hdf5, *args, **kwargs)if ds is None:raise IOError(''.format(path))return dsexcept:logging.getLogger(\"\").error(\"\" % path)raise", "docstring": "Open a DataFrame from file given by path.\n\n Example:\n\n >>> ds = vaex.open('sometable.hdf5')\n >>> ds = vaex.open('somedata*.csv', convert='bigdata.hdf5')\n\n :param str path: local or absolute path to file, or glob string\n :param convert: convert files to an hdf5 file for optimization, can also be a path\n :param bool shuffle: shuffle converted DataFrame or not\n :param args: extra arguments for file readers that need it\n :param kwargs: extra keyword arguments\n :param bool copy_index: copy index when source is read via pandas\n :return: return a DataFrame on succes, otherwise None\n :rtype: DataFrame", "id": "f6963:m2"} {"signature": "def set_log_level_info():", "body": "import logginglogging.getLogger(\"\").setLevel(logging.INFO)", "docstring": "set log level to info", "id": "f6963:m20"} {"signature": "def open_many(filenames):", "body": "dfs = []for filename in filenames:filename = filename.strip()if filename and filename[] != \"\":dfs.append(open(filename))return vaex.dataframe.DataFrameConcatenated(dfs=dfs)", "docstring": "Open a list of filenames, and return a DataFrame with all DataFrames cocatenated.\n\n :param list[str] filenames: list of filenames/paths\n :rtype: DataFrame", "id": "f6963:m3"} {"signature": "def subdivide(length, parts=None, max_length=None):", "body": "if max_length:i1 = done = Falsewhile not done:i2 = min(length, i1 + max_length)yield i1, i2i1 = i2if i1 == length:done = Trueelse:part_length = int(math.ceil(float(length) / parts))for index in range(parts):i1, i2 = index * part_length, min(length, (index + ) * part_length)yield i1, i2", "docstring": "Generates a list with start end stop indices of length parts, [(0, length/parts), ..., (.., length)]", "id": "f6969:m1"} {"signature": "def delayed(f):", "body": "def wrapped(*args, **kwargs):key_promise = list([(key, promisify(value)) for key, value in kwargs.items()])arg_promises = list([promisify(value) for value in args])kwarg_promises = list([promise for key, promise in key_promise])promises = arg_promises + kwarg_promisesfor promise in promises:def echo_error(exc, promise=promise):print(\"\", promise, \"\", exc)def echo(value, promise=promise):print(\"\", repr(promise), \"\", value)allarguments = aplus.listPromise(*promises)def call(_):kwargs_real = {key: promise.get() for key, promise in key_promise}args_real = list([promise.get() for promise in arg_promises])return f(*args_real, **kwargs_real)def error(exc):print(\"\", exc)raise excreturn allarguments.then(call, error)return wrapped", "docstring": "Decorator to transparantly accept delayed computation.\n\n Example:\n\n >>> delayed_sum = ds.sum(ds.E, binby=ds.x, limits=limits,\n >>> shape=4, delay=True)\n >>> @vaex.delayed\n >>> def total_sum(sums):\n >>> return sums.sum()\n >>> sum_of_sums = total_sum(delayed_sum)\n >>> ds.execute()\n >>> sum_of_sums.get()\n See the tutorial for a more complete example https://docs.vaex.io/en/latest/tutorial.html#Parallel-computations", "id": "f6972:m1"} {"signature": "def plot(self, grid=None, size=, limits=None, square=False, center=None, weight=None, weight_stat=\"\", figsize=None,aspect=\"\", f=\"\", axes=None, xlabel=None, ylabel=None,group_by=None, group_limits=None, group_colors='', group_labels=None, group_count=None,vmin=None, vmax=None,cmap=\"\",**kwargs):", "body": "import pylabf = _parse_f(f)limits = self.limits(limits)if limits is None:limits = self.limits_sigma()if group_limits is None and group_by:group_limits = tuple(self.df(group_by).minmax()[]) + (group_count,)if figsize is not None:pylab.figure(num=None, figsize=figsize, dpi=, facecolor='', edgecolor='')if axes is None:axes = pylab.gca()fig = pylab.gcf()pylab.xlabel(xlabel or self.expressions[])pylab.ylabel(ylabel or self.expressions[])rgba8 = self.image_rgba(grid=grid, size=size, limits=limits, square=square, center=center, weight=weight, weight_stat=weight_stat,f=f, axes=axes,group_by=group_by, group_limits=group_limits, group_colors=group_colors, group_count=group_count,vmin=vmin, vmax=vmax,cmap=cmap)import matplotlibif group_by:if isinstance(group_colors, six.string_types):group_colors = matplotlib.cm.get_cmap(group_colors)if isinstance(group_colors, matplotlib.colors.Colormap):group_count = group_limits[]colors = [group_colors(k / float(group_count - )) for k in range(group_count)]else:colors = [matplotlib.colors.colorConverter.to_rgba(k) for k in group_colors]colormap = matplotlib.colors.ListedColormap(colors)gmin, gmax, group_count = group_limits delta = (gmax - gmin) / (group_count - )norm = matplotlib.colors.Normalize(gmin - delta / , gmax + delta / )sm = matplotlib.cm.ScalarMappable(norm, colormap)sm.set_array() colorbar = fig.colorbar(sm)if group_labels:colorbar.set_ticks(np.arange(gmin, gmax + delta / , delta))colorbar.set_ticklabels(group_labels)else:colorbar.set_ticks(np.arange(gmin, gmax + delta / , delta))colorbar.set_ticklabels(map(lambda x: \"\" % x, np.arange(gmin, gmax + delta / , delta)))colorbar.ax.set_ylabel(group_by)im = axes.imshow(rgba8, extent=np.array(limits).flatten(), origin=\"\", aspect=aspect, **kwargs)else:norm = matplotlib.colors.Normalize(, )sm = matplotlib.cm.ScalarMappable(norm, cmap)sm.set_array() colorbar = fig.colorbar(sm)im = axes.imshow(rgba8, extent=np.array(limits).flatten(), origin=\"\", aspect=aspect, **kwargs)colorbar = Nonereturn im, colorbar", "docstring": "Plot the subspace using sane defaults to get a quick look at the data.\n\n :param grid: A 2d numpy array with the counts, if None it will be calculated using limits provided and Subspace.histogram\n :param size: Passed to Subspace.histogram\n :param limits: Limits for the subspace in the form [[xmin, xmax], [ymin, ymax]], if None it will be calculated using Subspace.limits_sigma\n :param square: argument passed to Subspace.limits_sigma\n :param Executor executor: responsible for executing the tasks\n :param figsize: (x, y) tuple passed to pylab.figure for setting the figure size\n :param aspect: Passed to matplotlib's axes.set_aspect\n :param xlabel: String for label on x axis (may contain latex)\n :param ylabel: Same for y axis\n :param kwargs: extra argument passed to axes.imshow, useful for setting the colormap for instance, e.g. cmap='afmhot'\n :return: matplotlib.image.AxesImage", "id": "f6974:c5:m15"} {"signature": "def sum(self):", "body": "raise NotImplementedError", "docstring": "Return a sequence of [sum, ... , sum] corresponding to the sum of values of each expression in this subspace ignoring NaN.", "id": "f6974:c5:m26"} {"signature": "def bounded_by_minmax(self):", "body": "bounds = self.minmax()return SubspaceBounded(self, bounds)", "docstring": "Returns a bounded subspace (SubspaceBounded) with limits given by Subspace.minmax()\n\n :rtype: SubspaceBounded", "id": "f6974:c5:m21"} {"signature": "def bounded_by_sigmas(self, sigmas=, square=False):", "body": "bounds = self.limits_sigma(sigmas=sigmas, square=square)return SubspaceBounded(self, bounds)", "docstring": "Returns a bounded subspace (SubspaceBounded) with limits given by Subspace.limits_sigma()\n\n :rtype: SubspaceBounded", "id": "f6974:c5:m22"} {"signature": "def limits(self, value, square=False):", "body": "if isinstance(value, six.string_types):import rematch = re.match(r\"\", value)if match is None:raise ValueError(\"\")else:value, type = match.groups()import astvalue = ast.literal_eval(value)type = type.strip()if type in [\"\", \"\"]:return self.limits_sigma(value)elif type in [\"\", \"\"]:return self.limits_sigma(value, square=True)elif type in [\"\", \"\"]:return self.limits_percentage(value)elif type in [\"\", \"\", \"\"]:return self.limits_percentage(value, square=True)if value is None:return self.limits_percentage(square=square)else:return value", "docstring": "TODO: doc + server side implementation", "id": "f6974:c5:m12"} {"signature": "def count(expression=''):", "body": "return _Statistic('', expression)", "docstring": "Creates a count statistic", "id": "f6977:m0"} {"signature": "@patchdef add_virtual_columns_cartesian_velocities_to_pmvr(self, x=\"\", y=\"\", z=\"\", vx=\"\", vy=\"\", vz=\"\", vr=\"\", pm_long=\"\", pm_lat=\"\", distance=None):", "body": "if distance is None:distance = \"\".format(**locals())k = self.add_variable(\"\", k, overwrite=False)self.add_virtual_column(vr, \"\".format(**locals()))self.add_virtual_column(pm_long, \"\".format(**locals()))self.add_virtual_column(pm_lat, \"\".format(**locals()))", "docstring": "Concert velocities from a cartesian system to proper motions and radial velocities\n\n TODO: errors\n\n :param x: name of x column (input)\n :param y: y\n :param z: z\n :param vx: vx\n :param vy: vy\n :param vz: vz\n :param vr: name of the column for the radial velocity in the r direction (output)\n :param pm_long: name of the column for the proper motion component in the longitude direction (output)\n :param pm_lat: name of the column for the proper motion component in the latitude direction, positive points to the north pole (output)\n :param distance: Expression for distance, if not given defaults to sqrt(x**2+y**2+z**2), but if this column already exists, passing this expression may lead to a better performance\n :return:", "id": "f6985:m5"} {"signature": "def patch(f):", "body": "name = f.__name__Dataset.__hidden__[name] = freturn f", "docstring": "Adds method f to the Dataset class", "id": "f6985:m0"} {"signature": "@patchdef add_virtual_columns_lbrvr_proper_motion2vcartesian(self, long_in=\"\", lat_in=\"\", distance=\"\", pm_long=\"\", pm_lat=\"\",vr=\"\", vx=\"\", vy=\"\", vz=\"\",center_v=(, , ),propagate_uncertainties=False, radians=False):", "body": "k = a, d, distance = self._expr(long_in, lat_in, distance)pm_long, pm_lat, vr = self._expr(pm_long, pm_lat, vr)if not radians:a = a * np.pi/d = d * np.pi/A = [[np.cos(a)*np.cos(d), -np.sin(a), -np.cos(a)*np.sin(d)],[np.sin(a)*np.cos(d), np.cos(a), -np.sin(a)*np.sin(d)],[np.sin(d), d*, np.cos(d)]]self.add_virtual_columns_matrix3d(vr, k * pm_long * distance, k * pm_lat * distance, vx, vy, vz, A, translation=center_v)if propagate_uncertainties:self.propagate_uncertainties([self[vx], self[vy], self[vz]])", "docstring": "Convert radial velocity and galactic proper motions (and positions) to cartesian velocities wrt the center_v\n\n Based on http://adsabs.harvard.edu/abs/1987AJ.....93..864J\n\n\n :param long_in: Name/expression for galactic longitude\n :param lat_in: Name/expression for galactic latitude\n :param distance: Name/expression for heliocentric distance\n :param pm_long: Name/expression for the galactic proper motion in latitude direction (pm_l*, so cosine(b) term should be included)\n :param pm_lat: Name/expression for the galactic proper motion in longitude direction\n :param vr: Name/expression for the radial velocity\n :param vx: Output name for the cartesian velocity x-component\n :param vy: Output name for the cartesian velocity y-component\n :param vz: Output name for the cartesian velocity z-component\n :param center_v: Extra motion that should be added, for instance lsr + motion of the sun wrt the galactic restframe\n :param radians: input and output in radians (True), or degrees (False)\n :return:", "id": "f6985:m8"} {"signature": "def get_transform(self):", "body": "return self.get_patch_transform()", "docstring": "Return the :class:`~matplotlib.transforms.Transform` applied\nto the :class:`Patch`.", "id": "f7001:c0:m4"} {"signature": "def __init__(self, dataset, parent=None, *args):", "body": "QtCore.QAbstractTableModel.__init__(self, parent, *args)self.dataset = datasetself.row_count_start = self.table_column_names = [\"\", \"\", \"\"]", "docstring": ":type dataset: Dataset", "id": "f7011:c0:m0"} {"signature": "def __init__(self, parent, dataset, variables=False):", "body": "QtGui.QComboBox.__init__(self, parent)self.identifiers = []self.columns = dataset.get_column_names(virtual=True)self.identifiers.extend(vaex.dataset.expression_namespace.keys())self.identifiers.extend(self.columns)if variables:self.identifiers.extend(list(dataset.variables.keys()))self.addItems([\"\"] + self.columns)self.setEditable(True)lineEdit = self.lineEdit()self.completer = IdentifierCompleter(lineEdit, self.identifiers)lineEdit.setCompleter(self.completer)", "docstring": ":param parent:\n:param Dataset dataset:", "id": "f7018:c7:m0"} {"signature": "@propertydef vx(self):", "body": "return self.state.vector_expressions[]", "docstring": "vector x expression", "id": "f7020:c2:m12"} {"signature": "@propertydef zlim(self):", "body": "return self.get_range()", "docstring": "vector z expression", "id": "f7020:c2:m28"} {"signature": "@propertydef statistic(self):", "body": "return self.state.statistic", "docstring": "vector z expression", "id": "f7020:c2:m18"} {"signature": "def disconnect(self):", "body": "self.canvas.mpl_disconnect(self._cidmotion)self.canvas.mpl_disconnect(self._ciddraw)", "docstring": "disconnect events", "id": "f7025:c0:m2"} {"signature": "def _wait(self):", "body": "logger.debug(\"\")self._plot_event = threading.Event()self.queue_update._wait()self.queue_replot._wait()self.queue_redraw._wait()qt_app = QtCore.QCoreApplication.instance()sleep = while not self._plot_event.is_set():logger.debug(\"\")qt_app.processEvents()QtTest.QTest.qSleep(sleep)logger.debug(\"\")", "docstring": "Used for unittesting to make sure the plots are all done", "id": "f7025:c1:m20"} {"signature": "def arrow_table_from_vaex_df(ds, column_names=None, selection=None, strings=True, virtual=False):", "body": "names = []arrays = []for name, array in ds.to_items(column_names=column_names, selection=selection, strings=strings, virtual=virtual):names.append(name)arrays.append(arrow_array_from_numpy_array(array))return pyarrow.Table.from_arrays(arrays, names)", "docstring": "Implementation of Dataset.to_arrow_table", "id": "f7030:m4"} {"signature": "def _export_table(dataset, column_names=None, byteorder=\"\", shuffle=False, selection=False, progress=None, virtual=True, sort=None, ascending=True):", "body": "column_names = column_names or dataset.get_column_names(virtual=virtual, strings=True)for name in column_names:if name not in dataset.columns:warnings.warn('')N = len(dataset) if not selection else dataset.selected_length(selection)if N == :raise ValueError(\"\")if shuffle and sort:raise ValueError(\"\")if shuffle:random_index_column = \"\"while random_index_column in dataset.get_column_names():random_index_column += \"\"partial_shuffle = shuffle and len(dataset) != Norder_array = Noneif partial_shuffle:shuffle_array_full = np.random.choice(len(dataset), len(dataset), replace=False)shuffle_array = shuffle_array_full[shuffle_array_full < N]del shuffle_array_fullorder_array = shuffle_arrayelif shuffle:shuffle_array = np.random.choice(N, N, replace=False)order_array = shuffle_arrayif sort:if selection:raise ValueError(\"\")logger.info(\"\")indices = np.argsort(dataset.evaluate(sort))order_array = indices if ascending else indices[::-]logger.info(\"\")if selection:full_mask = dataset.evaluate_selection_mask(selection)else:full_mask = Nonearrow_arrays = []for column_name in column_names:mask = full_maskif selection:values = dataset.evaluate(column_name, filtered=False)values = values[mask]else:values = dataset.evaluate(column_name)if shuffle or sort:indices = order_arrayvalues = values[indices]arrow_arrays.append(arrow_array_from_numpy_array(values))if shuffle:arrow_arrays.append(arrow_array_from_numpy_array(order_array))column_names = column_names + [random_index_column]table = pa.Table.from_arrays(arrow_arrays, column_names)return table", "docstring": ":param DatasetLocal dataset: dataset to export\n:param str path: path for file\n:param lis[str] column_names: list of column names to export or None for all columns\n:param str byteorder: = for native, < for little endian and > for big endian\n:param bool shuffle: export rows in random order\n:param bool selection: export selection or not\n:param progress: progress callback that gets a progress fraction as argument and should return True to continue,\n or a default progress bar when progress=True\n:param: bool virtual: When True, export virtual columns\n:return:", "id": "f7031:m2"} {"signature": "def _main(argv):", "body": "parser = argparse.ArgumentParser(description=DESCRIPTION,formatter_class=argparse.RawDescriptionHelpFormatter,)parser.add_argument('','',default=URL_BASE,help='' % URL_BASE,)parser.add_argument('','',default=None,help='',)parser.add_argument('','',default='',help='',)parser.add_argument('', '', help=\"\")parser.add_argument('','',action='',help='',)parser.add_argument('',help='',)parser.add_argument('',nargs='',default=None,help='',)args = parser.parse_args(argv)if args.save_token:if not args.token:raise ValueError(\"\")LuminosoClient.save_token(args.token,domain=urlparse(args.base_url).netloc)client = LuminosoClient.connect(url=args.base_url, token=args.token)name = args.project_nameif name is None:name = input('')if not name:print('')returnresult = upload_docs(client,args.input_filename,args.language,name,account=args.account_id,progress=True,)print(''.format(result[''], result['']))", "docstring": "Handle arguments for the 'lumi-upload' command.", "id": "f7054:m5"} {"signature": "def _simplify_doc(doc):", "body": "doc = dict(doc)if '' not in doc:raise ValueError(\"\".format(doc))return {'': doc[''],'': doc.get('', []),'': doc.get('', '')}", "docstring": "Limit a document to just the three fields we should upload.", "id": "f7054:m1"} {"signature": "def transcode_to_stream(input_filename, date_format=None):", "body": "tmp = tempfile.TemporaryFile()for entry in open_json_or_csv_somehow(input_filename,date_format=date_format):tmp.write(json.dumps(entry, ensure_ascii=False).encode(''))tmp.write(b'')tmp.seek()return tmp", "docstring": "Read a JSON or CSV file and convert it into a JSON stream, which will\nbe saved in an anonymous temp file.", "id": "f7056:m1"} {"signature": "def _convert_date(date_string, date_format):", "body": "if date_format != '':return datetime.strptime(date_string, date_format).timestamp()else:return float(date_string)", "docstring": "Convert a date in a given format to epoch time. Mostly a wrapper for\ndatetime's strptime.", "id": "f7056:m4"} {"signature": "def detect_file_encoding(filename):", "body": "with open(filename, '') as opened:sample = opened.read( ** )_, encoding = ftfy.guess_bytes(sample)return encoding", "docstring": "Use ftfy to detect the encoding of a file, based on a sample of its\nfirst megabyte.\n\nftfy's encoding detector is limited. The only encodings it can detect are\nUTF-8, CESU-8, UTF-16, Windows-1252, and occasionally MacRoman. But it\ndoes much better than chardet.", "id": "f7056:m5"} {"signature": "def transcode(input_filename, output_filename=None, date_format=None):", "body": "if output_filename is None:output = sys.stdoutelse:if output_filename.endswith(''):logger.warning(\"\"\"\"\"\")output_filename += ''output = open(output_filename, '')for entry in open_json_or_csv_somehow(input_filename,date_format=date_format):output.write(json.dumps(entry, ensure_ascii=False).encode(''))output.write('')output.close()", "docstring": "Convert a JSON or CSV file of input to a JSON stream (.jsons). This\nkind of file can be easily uploaded using `luminoso_api.upload`.", "id": "f7056:m0"} {"signature": "def open_csv_somehow(filename):", "body": "if PY3:return open_csv_somehow_py3(filename)else:return open_csv_somehow_py2(filename)", "docstring": "Given a filename that we're told is a CSV file, detect its encoding,\nparse its header, and return a generator yielding its rows as dictionaries.", "id": "f7056:m7"} {"signature": "def open_json_or_csv_somehow(filename, date_format=None):", "body": "fileformat = Noneif filename.endswith(''):fileformat = ''elif filename.endswith(''):fileformat = ''else:with open(filename) as opened:line = opened.readline()if line[] not in '' and not filename.endswith(''):fileformat = ''else:if (line.count('') == line.count('') andline.count('') == line.count('')):char = ''while char.isspace():char = opened.read()if char == '':fileformat = ''breakif fileformat is None:fileformat = ''else:fileformat = ''if fileformat == '':stream = json.load(open(filename), encoding='')elif fileformat == '':stream = open_csv_somehow(filename)else:stream = stream_json_lines(filename)return _normalize_data(stream, date_format=date_format)", "docstring": "Deduce the format of a file, within reason.\n\n- If the filename ends with .csv or .txt, it's csv.\n- If the filename ends with .jsons, it's a JSON stream (conveniently the\n format we want to output).\n- If the filename ends with .json, it could be a legitimate JSON file, or\n it could be a JSON stream, following a nonstandard convention that many\n people including us are guilty of. In that case:\n - If the first line is a complete JSON document, and there is more in the\n file besides the first line, then it is a JSON stream.\n - Otherwise, it is probably really JSON.\n- If the filename does not end with .json, .jsons, or .csv, we have to guess\n whether it's still CSV or tab-separated values or something like that.\n If it's JSON, the first character would almost certainly have to be a\n bracket or a brace. If it isn't, assume it's CSV or similar.", "id": "f7056:m2"} {"signature": "def _json_request(self, req_type, url, **kwargs):", "body": "response = self._request(req_type, url, **kwargs)try:json_response = response.json()except ValueError:logger.error(\"\" %(response, response.content))raise LuminosoError('')return json_response", "docstring": "Make a request of the specified type and expect a JSON object in\nresponse.\n\nIf the result has an 'error' value, raise a LuminosoAPIError with\nits contents. Otherwise, return the contents of the 'result' value.", "id": "f7058:c0:m6"} {"signature": "def delete(self, path='', **params):", "body": "params = jsonify_parameters(params)url = ensure_trailing_slash(self.url + path.lstrip(''))return self._json_request('', url, params=params)", "docstring": "Make a DELETE request to the given path, and return the JSON-decoded\nresult.\n\nKeyword parameters will be converted to URL parameters.\n\nDELETE requests ask to delete the object represented by this URL.", "id": "f7058:c0:m11"} {"signature": "def __init__(self, session, url):", "body": "self.session = sessionself.url = ensure_trailing_slash(url)self.root_url = get_root_url(url, warn=False)", "docstring": "Create a LuminosoClient given an existing Session object that has a\n_TokenAuth object as its .auth attribute.\n\nIt is probably easier to call LuminosoClient.connect() to handle\nthe authentication for you.", "id": "f7058:c0:m0"} {"signature": "def client_for_path(self, path):", "body": "if path.startswith(''):url = self.root_url + pathelse:url = self.url + pathreturn self.__class__(self.session, url)", "docstring": "Returns a new client with the same root URL and authentication, but\na different specific URL. For instance, if you have a client pointed\nat https://analytics.luminoso.com/api/v5/, and you want new ones for\nProject A and Project B, you would call:\n\n client_a = client.client_for_path('projects/')\n client_b = client.client_for_path('projects/')\n\nand your base client would remian unchanged.\n\nPaths with leading slashes are appended to the root url; otherwise,\npaths are set relative to the current path.", "id": "f7058:c0:m12"} {"signature": "def upload_file(filename, server, account, projname, language=None,username=None, password=None,append=False, stage=False, date_format=None):", "body": "stream = transcode_to_stream(filename, date_format)upload_stream(stream_json_lines(stream),server, account, projname, language=language,username=username, password=password,append=append, stage=stage)", "docstring": "Upload a file to Luminoso with the given account and project name.\n\nGiven a file containing JSON, JSON stream, or CSV data, this verifies\nthat we can successfully convert it to a JSON stream, then uploads that\nJSON stream.", "id": "f7060:m2"} {"signature": "def upload_stream(stream, server, account, projname, language=None,username=None, password=None,append=False, stage=False):", "body": "client = LuminosoClient.connect(server,username=username, password=password)if not append:info = client.post('' + account, name=projname)project_id = info['']print('', project_id)else:projects = client.get('' + account, name=projname)if len(projects) == :print('')returnif len(projects) > :print('' % projname,end='')project_id = projects[]['']print('' % project_id)project = client.change_path('' + account + '' + project_id)counter = for batch in batches(stream, ):counter += documents = list(batch)project.upload('', documents)print('' % (counter))if not stage:print('')kwargs = {}if language is not None:kwargs = {'': language}job_id = project.post('', **kwargs)project.wait_for(job_id)", "docstring": "Given a file-like object containing a JSON stream, upload it to\nLuminoso with the given account name and project name.", "id": "f7060:m1"} {"signature": "def post(self, path='', **params):", "body": "params = jsonify_parameters(params)url = ensure_trailing_slash(self.url + path.lstrip(''))return self._json_request('', url, data=params)", "docstring": "Make a POST request to the given path, and return the JSON-decoded\nresult.\n\nKeyword parameters will be converted to form values, sent in the body\nof the POST.\n\nPOST requests are requests that cause a change on the server,\nespecially those that ask to create and return an object of some kind.", "id": "f7061:c0:m8"} {"signature": "def get_raw(self, path, **params):", "body": "url = ensure_trailing_slash(self.url + path.lstrip(''))return self._request('', url, params=params).text", "docstring": "Get the raw text of a response.\n\nThis is only generally useful for specific URLs, such as documentation.", "id": "f7061:c0:m20"} {"signature": "def put(self, path='', **params):", "body": "params = jsonify_parameters(params)url = ensure_trailing_slash(self.url + path.lstrip(''))return self._json_request('', url, data=params)", "docstring": "Make a PUT request to the given path, and return the JSON-decoded\nresult.\n\nKeyword parameters will be converted to form values, sent in the body\nof the PUT.\n\nPUT requests are usually requests to *update* the object represented by\nthis URL. Unlike POST requests, PUT requests can be safely duplicated.", "id": "f7061:c0:m9"} {"signature": "def post_data(self, path, data, content_type, **params):", "body": "params = jsonify_parameters(params)url = ensure_trailing_slash(self.url + path.lstrip(''))return self._json_request('', url,params=params,data=data,headers={'': content_type})", "docstring": "Make a POST request to the given path, with `data` in its body.\nReturn the JSON-decoded result.\n\nThe content_type must be set to reflect the kind of data being sent,\nwhich is often `application/json`.\n\nKeyword parameters will be converted to URL parameters. This is unlike\nother POST requests which encode those parameters in the body, because\nthe body is already being used.\n\nThis is used by the Luminoso API to upload new documents in JSON\nformat.", "id": "f7061:c0:m12"} {"signature": "@classmethoddef connect(cls, url=None, username=None, password=None, token=None,token_file=None):", "body": "auto_account = Falseif url is None:auto_account = Trueurl = ''if url.startswith(''):root_url = get_root_url(url)else:url = URL_BASE + '' + url.lstrip('')root_url = URL_BASEauth = cls._get_token_auth(username, password, token, token_file,root_url)session = requests.session()session.auth = authclient = cls(session, url)if auto_account:client = client.change_path('' %client._get_default_account())return client", "docstring": "Returns an object that makes requests to the API, authenticated\nwith the provided username/password, at URLs beginning with `url`.\n\nYou can leave out the URL and get your 'default URL', a base path\nthat is probably appropriate for creating projects on your\naccount:\n\n client = LuminosoClient.connect(username=username)\n\nIf the URL is simply a path, omitting the scheme and domain, then\nit will default to https://analytics.luminoso.com/api/v4/, which is\nprobably what you want:\n\n client = LuminosoClient.connect('/projects/public', username=username)\n\nIf you leave out the username, it will use your system username,\nwhich is convenient if it matches your Luminoso username:\n\n client = LuminosoClient.connect()", "id": "f7061:c0:m2"} {"signature": "def get(self, path='', **params):", "body": "params = jsonify_parameters(params)url = ensure_trailing_slash(self.url + path.lstrip(''))return self._json_request('', url, params=params)", "docstring": "Make a GET request to the given path, and return the JSON-decoded\nresult.\n\nKeyword parameters will be converted to URL parameters.\n\nGET requests are requests that retrieve information without changing\nanything on the server.", "id": "f7061:c0:m7"} {"signature": "def __call__(self, request):", "body": "request.headers[''] = '' + self.tokenreturn request", "docstring": "Add an authorization header containing the token.", "id": "f7063:c0:m1"} {"signature": "@classmethoddef from_user_creds(cls, username, password, url=URL_BASE):", "body": "session = requests.session()token_resp = session.post(url.rstrip('') + '',data={'': username,'': password})if token_resp.status_code != :error = token_resp.texttry:error = json.loads(error)['']except (KeyError, ValueError):passraise LuminosoLoginError(error)return cls(token_resp.json()[''][''])", "docstring": "Obtain a short-lived token using a username and password, and use that\ntoken to create an auth object.", "id": "f7063:c0:m2"} {"signature": "def _print_csv(result):", "body": "if type(result) is not list:raise TypeError(\"\")first_line = result[]w = csv.DictWriter(sys.stdout, fieldnames=sorted(first_line.keys()))w.writeheader()for line in result:w.writerow(line)", "docstring": "Print a JSON list of JSON objects in CSV format.", "id": "f7064:m0"} {"signature": "def _read_params(input_file, json_body, p_params):", "body": "params = {}try:if input_file:params.update(json.load(input_file))if json_body is not None:params.update(json.loads(json_body))except ValueError as e:raise ValueError(\"\" % e)try:params.update({p.split('', )[]: p.split('', )[] for p in p_params})except IndexError:raise ValueError(\"\")return params", "docstring": "Read parameters from input file, -j, and -p arguments, in that order.", "id": "f7064:m1"} {"signature": "def _mkdir_p(path):", "body": "try:os.makedirs(path)except OSError as exc:if exc.errno == errno.EEXIST and os.path.isdir(path):passelse:raiseelse:logger.info(\"\", path, os.path.sep)", "docstring": "mkdir -p path", "id": "f7065:m7"} {"signature": "def generate_controller(args):", "body": "controller_template = os.path.join(dirname(abspath(__file__)), '')test_template = os.path.join(dirname(abspath(__file__)), '')controller_name = args.get('')current_path = os.getcwd()logger.info('')if not controller_name:logger.warning('')returnwith open(controller_template, '') as template_file:controller_file_path = os.path.join(current_path, '',controller_name + '')with open(controller_file_path, '') as controller_file:for line in template_file:new_line = line.replace('', controller_name)controller_file.write(new_line)logger.info(\"\" % _relative_path(controller_file_path))with open(test_template, '') as template_file:test_file_path = os.path.join(current_path, '','' % controller_name)with open(test_file_path, '') as test_file:for line in template_file:new_line = line.replace('', controller_name).replace('', controller_name.title())test_file.write(new_line)logger.info(\"\" % _relative_path(test_file_path))assets_dir_path = os.path.join(current_path, '' % controller_name)_mkdir_p(assets_dir_path)_generate_form(controller_name)logger.info('')", "docstring": "Generate controller, include the controller file, template & css & js directories.", "id": "f7065:m1"} {"signature": "@bp.route('')def index():", "body": "return render_template('')", "docstring": "Index page.", "id": "f7079:m0"} {"signature": "@bp.route('')def about():", "body": "return render_template('')", "docstring": "About page.", "id": "f7079:m1"} {"signature": "@bp.route('', methods=['', ''])@VisitorPermission()def signup():", "body": "form = SignupForm()if form.validate_on_submit():params = form.data.copy()params.pop('')user = User(**params)db.session.add(user)db.session.commit()signin_user(user)return redirect(url_for(''))return render_template('', form=form)", "docstring": "Signup", "id": "f7080:m1"} {"signature": "@bp.route('')def signout():", "body": "signout_user()return redirect(request.referrer or url_for(''))", "docstring": "Signout", "id": "f7080:m2"} {"signature": "def encode(something):", "body": "secret_key = current_app.config.get('')s = URLSafeSerializer(secret_key)return s.dumps(something)", "docstring": "Encode something with SECRET_KEY.", "id": "f7086:m0"} {"signature": "def absolute_url_for(endpoint, **values):", "body": "config = current_app.configsite_domain = config.get('')relative_url = url_for(endpoint, **values)return join_url(site_domain, relative_url)", "docstring": "Absolute url for endpoint.", "id": "f7088:m0"} {"signature": "def get_current_user():", "body": "if not '' in session:return Noneuser = User.query.filter(User.id == session['']).first()if not user:signout_user()return Nonereturn user", "docstring": "Get current user.", "id": "f7089:m2"} {"signature": "def register_error_handle(app):", "body": "@app.errorhandler()def page_403(error):return render_template(''), @app.errorhandler()def page_404(error):return render_template(''), @app.errorhandler()def page_500(error):return render_template(''), ", "docstring": "Register HTTP error pages.", "id": "f7095:m4"} {"signature": "def create_app():", "body": "config = load_config()app = Flask(__name__)app.config.from_object(config)app.wsgi_app = ProxyFix(app.wsgi_app)CsrfProtect(app)if app.debug or app.testing:DebugToolbarExtension(app)app.wsgi_app = SharedDataMiddleware(app.wsgi_app, {'': os.path.join(app.config.get(''), '')})else:app.logger.addHandler(logging.StreamHandler())app.logger.setLevel(logging.ERROR)if app.config.get(''):from .utils.sentry import sentrysentry.init_app(app, dsn=app.config.get(''))app.wsgi_app = SharedDataMiddleware(app.wsgi_app, {'': os.path.join(app.config.get(''), ''),'': os.path.join(app.config.get(''), ''),'': os.path.join(app.config.get(''), '')})register_db(app)register_routes(app)register_jinja(app)register_error_handle(app)register_hooks(app)return app", "docstring": "Create Flask app.", "id": "f7095:m0"} {"signature": "def register_routes(app):", "body": "from . import controllersfrom flask.blueprints import Blueprintfor module in _import_submodules_from_package(controllers):bp = getattr(module, '')if bp and isinstance(bp, Blueprint):app.register_blueprint(bp)", "docstring": "Register routes.", "id": "f7095:m3"} {"signature": "@manager.commanddef live():", "body": "from livereload import Serverserver = Server(app)map(server.watch, glob2.glob(\"\")) map(server.watch, glob2.glob(\"\")) map(server.watch, glob2.glob(\"\")) server.serve(port=PORT)", "docstring": "Run livereload server", "id": "f7097:m1"} {"signature": "@manager.commanddef build():", "body": "os.system('')os.chdir('')os.system('')", "docstring": "Use FIS to compile assets.", "id": "f7097:m2"} {"signature": "def setUp(self):", "body": "pass", "docstring": "Called before every test.", "id": "f7099:c0:m0"} {"signature": "def tearDown(self):", "body": "pass", "docstring": "Called after every test.", "id": "f7099:c0:m1"} {"signature": "def __init__(self, host, port, loop, monitored_zones=[],monitored_outputs=[], partitions=[]):", "body": "self._host = hostself._port = portself._loop = loopself._message_handlers = {}self._monitored_zones = monitored_zonesself.violated_zones = []self._monitored_outputs = monitored_outputsself.violated_outputs = []self.partition_states = {}self._keep_alive_timeout = self._reconnection_timeout = self._reader = Noneself._writer = Noneself.closed = Falseself._alarm_status_callback = Noneself._zone_changed_callback = Noneself._output_changed_callback = Noneself._partitions = partitionsself._command_status_event = asyncio.Event()self._command_status = Falseself._message_handlers[b''] = self._zone_violatedself._message_handlers[b''] = self._output_changedself._message_handlers[b''] = lambda msg: self._armed(AlarmState.ARMED_MODE0, msg)self._message_handlers[b''] = lambda msg: self._armed(AlarmState.ARMED_MODE1, msg)self._message_handlers[b''] = lambda msg: self._armed(AlarmState.ARMED_MODE2, msg)self._message_handlers[b''] = lambda msg: self._armed(AlarmState.ARMED_MODE3, msg)self._message_handlers[b''] = lambda msg: self._armed(AlarmState.ARMED_SUPPRESSED, msg)self._message_handlers[b''] = lambda msg: self._armed(AlarmState.ENTRY_TIME, msg)self._message_handlers[b''] = lambda msg: self._armed(AlarmState.EXIT_COUNTDOWN_OVER_10, msg)self._message_handlers[b''] = lambda msg: self._armed(AlarmState.EXIT_COUNTDOWN_UNDER_10, msg)self._message_handlers[b''] = lambda msg: self._command_result(msg)self._message_handlers[b''] = lambda msg: self._armed(AlarmState.TRIGGERED, msg)self._message_handlers[b''] = lambda msg: self._armed(AlarmState.TRIGGERED_FIRE, msg)", "docstring": "Init the Satel alarm data.", "id": "f7103:c1:m0"} {"signature": "def generate_query(command):", "body": "data = bytearray(command)c = checksum(data)data.append(c >> )data.append(c & )data.replace(b'', b'')data = bytearray.fromhex(\"\") + data + bytearray.fromhex(\"\")return data", "docstring": "Add header, checksum and footer to command data.", "id": "f7103:m4"} {"signature": "async def disarm(self, code, partition_list):", "body": "_LOGGER.info(\"\")while len(code) < :code += ''code_bytes = bytearray.fromhex(code)data = generate_query(b'' + code_bytes+ partition_bytes(partition_list))await self._send_data(data)", "docstring": "Send command to disarm.", "id": "f7103:c1:m9"} {"signature": "async def clear_alarm(self, code, partition_list):", "body": "_LOGGER.info(\"\")while len(code) < :code += ''code_bytes = bytearray.fromhex(code)data = generate_query(b'' + code_bytes+ partition_bytes(partition_list))await self._send_data(data)", "docstring": "Send command to clear the alarm.", "id": "f7103:c1:m10"} {"signature": "async def start_monitoring(self):", "body": "data = generate_query(b'')await self._send_data(data)resp = await self._read_data()if resp is None:_LOGGER.warning(\"\")returnif resp[:] != b'':_LOGGER.warning(\"\")", "docstring": "Start monitoring for interesting events.", "id": "f7103:c1:m3"} {"signature": "def _output_changed(self, msg):", "body": "status = {\"\": {}}output_states = list_set_bits(msg, )self.violated_outputs = output_states_LOGGER.debug(\"\",output_states, self._monitored_outputs)for output in self._monitored_outputs:status[\"\"][output] = if output in output_states else _LOGGER.debug(\"\", status)if self._output_changed_callback:self._output_changed_callback(status)return status", "docstring": "0x17 outputs state 0x17 + 16/32 bytes", "id": "f7103:c1:m5"} {"signature": "def validate(validator):", "body": "def decorator(func):\"\"\"\"\"\"@wraps(func)def wrapper(image, size, validate=True):if validate:validator(image, size)return func(image, size)return wrapperreturn decorator", "docstring": "Return a decorator that validates arguments with provided `validator`\nfunction.\n\nThis will also store the validator function as `func.validate`.\nThe decorator returned by this function, can bypass the validator\nif `validate=False` is passed as argument otherwise the fucntion is\ncalled directly.\n\nThe validator must raise an exception, if the function can not\nbe called.", "id": "f7107:m0"} {"signature": "def url_to_image(url):", "body": "r = requests.get(url)image = StringIO(r.content)return image", "docstring": "Fetch an image from url and convert it into a Pillow Image object", "id": "f7108:m1"} {"signature": "def string_to_image(image_string):", "body": "image_filelike = StringIO(image_string)image = Image.open(image_filelike)return image", "docstring": "Convert string datas into a Pillow Image object", "id": "f7108:m2"} {"signature": "def file_to_image(image_file_name):", "body": "with open(image_file_name, '') as f_image:return Image.open(f_image)", "docstring": "Convert a file into a Pillow Image object", "id": "f7108:m0"} {"signature": "@taskdef clean():", "body": "rm(\"\")", "docstring": "clean up the crap left behind by setup.py build", "id": "f7118:m0"} {"signature": "@taskdef reinstall():", "body": "pip(\"\")install()", "docstring": "uninstall and reinstall pub; probably only useful for developers", "id": "f7118:m3"} {"signature": "@taskdef build():", "body": "python(\"\")", "docstring": "build pub", "id": "f7118:m1"} {"signature": "def is_robot(user_agent):", "body": "return _match_useragent(user_agent, '')", "docstring": "Determine if user agent is a robot/crawler/spider.\n\n Determined according to the *Code of Practice for Research Data*.", "id": "f7129:m4"} {"signature": "def memoize(func):", "body": "cache = {}@wraps(func)def inner(filename):if filename not in cache:cache[filename] = func(filename)return cache[filename]return inner", "docstring": "Cache result of function call.", "id": "f7129:m1"} {"signature": "def _change_value(self, key, value):", "body": "if not self.config.has_section(key[]):self.config.add_section(key[])self.config.set(key[], key[], str(value))with open(self.configfile, \"\") as f:self.config.write(f)", "docstring": "Change the value of the given key in the given file to the given value", "id": "f7138:c1:m4"} {"signature": "def _migrate_config(self, oldname=DEFAULT_CONFIG, newname=DEFAULT_CONFIG):", "body": "self._log(\"\"\"\", logging.WARNING)with open(oldname, \"\") as old:with open(newname, \"\") as new:new.write(\"\")new.write(old.read())", "docstring": "Migrates the old config file format to the new one", "id": "f7138:c1:m5"} {"signature": "def _get_value(self, key, func=None, split_val=None, as_boolean=False,exception_default=None):", "body": "try:if as_boolean:return self.config.getboolean(key[], key[])value = self.config.get(key[], key[])if split_val is not None:value = value.split(split_val)if func is not None:return func(value)return valueexcept (KeyError, configparser.NoSectionError, configparser.NoOptionError) as e:if exception_default is not None:return exception_defaultraise KeyError(e)", "docstring": "Helper method to get a value from the config", "id": "f7138:c1:m3"} {"signature": "def _start_webserver(self, authorize_url=None):", "body": "server_address = (SERVER_URL, SERVER_PORT)self.server = HTTPServer(server_address, OAuth2UtilRequestHandler)self.server.response_code = Noneself.server.authorize_url = authorize_urlt = Thread(target=self.server.serve_forever)t.daemon = Truet.start()", "docstring": "Start the webserver that will receive the code", "id": "f7138:c1:m6"} {"signature": "def _set_app_info(self):", "body": "redirect_url = \"\".format(SERVER_URL, SERVER_PORT,SERVER_REDIRECT_PATH)self.r.set_oauth_app_info(self._get_value(CONFIGKEY_APP_KEY),self._get_value(CONFIGKEY_APP_SECRET),redirect_url)", "docstring": "Set the app info (id & secret) read from the config file on the Reddit object", "id": "f7138:c1:m2"} {"signature": "@propertydef tls_client(self):", "body": "if self.tls_cert and self.tls_key:return (self.tls_cert, self.tls_key)return None", "docstring": "A tuple consisting of the TLS client certificate and key if they\n have been provided, otherwise None.", "id": "f7145:c1:m2"} {"signature": "def pad(data_to_pad, block_size, style=''):", "body": "padding_len = block_size-len(data_to_pad)%block_sizeif style == '':padding = bchr(padding_len)*padding_lenelif style == '':padding = bchr()*(padding_len-) + bchr(padding_len)elif style == '':padding = bchr() + bchr()*(padding_len-)else:raise ValueError(\"\")return data_to_pad + padding", "docstring": "Apply standard padding.\n\n :Parameters:\n data_to_pad : byte string\n The data that needs to be padded.\n block_size : integer\n The block boundary to use for padding. The output length is guaranteed\n to be a multiple of ``block_size``.\n style : string\n Padding algorithm. It can be *'pkcs7'* (default), *'iso7816'* or *'x923'*.\n :Return:\n The original data with the appropriate padding added at the end.", "id": "f7155:m0"} {"signature": "def __mul__(self, rhs):", "body": "if isinstance(rhs, scipy.sparse.spmatrix):def qIter(qs):for j in range(qs.shape[]):qi = qs.getcol(j).toarray().ravel()yield qireturnelse:def qIter(qs):for j in range(qs.shape[]):qi = qs[:, j]yield qireturnresult = np.empty(rhs.shape, dtype=np.complex128)for i, q in enumerate(qIter(rhs)):result[:, i] = self._solve(q)return result", "docstring": "Carries out the action of solving for wavefields.\n\nArgs:\n rhs (sparse matrix): Right-hand side vector(s)\n\nReturns:\n np.ndarray: Wavefields", "id": "f7165:c0:m5"} {"signature": "@propertydef Ainv(self):", "body": "if getattr(self, '', None) is None:self._Ainv = self.Solver(self.A, )self._Ainv.run_pardiso()return self._Ainv", "docstring": "Returns a Solver instance", "id": "f7165:c2:m0"} {"signature": "def params_matching(layers, patterns):", "body": "if isinstance(patterns, basestring):patterns = (patterns, )for layer in layers:for param in layer.params:name = param.namefor pattern in patterns:if fnmatch.fnmatch(name, pattern):yield name, parambreak", "docstring": "Get the parameters from a network that match a pattern.\n\n Parameters\n ----------\n layers : list of :class:`theanets.layers.Layer`\n A list of network layers to retrieve parameters from.\n patterns : sequence of str\n A sequence of glob-style patterns to match against. Any parameter\n matching any pattern in this sequence will be included in the match.\n\n Yields\n ------\n matches : pair of str, theano expression\n Generates a sequence of (name, expression) pairs. The name is the name\n of the parameter that matched, and the expression represents the\n parameter symbolically.", "id": "f7167:m3"} {"signature": "def _find_output(self, layer):", "body": "if layer is None:layer = len(self.layers) // if isinstance(layer, int):layer = self.layers[layer]if isinstance(layer, util.basestring):try:layer = [l for l in self.layers if l.name == layer][]except IndexError:passif isinstance(layer, layers.Layer):layer = layer.output_namereturn layer", "docstring": "Find a layer output name for the given layer specifier.\n\n Parameters\n ----------\n layer : None, int, str, or :class:`theanets.layers.Layer`\n A layer specification. If this is None, the \"middle\" layer in the\n network will be used (i.e., the layer at the middle index in the\n list of network layers). If this is an integer, the corresponding\n layer in the network's layer list will be used. If this is a string,\n the layer with the corresponding name will be returned.\n\n Returns\n -------\n name : str\n The fully-scoped output name for the desired layer.", "id": "f7168:c0:m3"} {"signature": "def predict_proba(self, x, **kwargs):", "body": "return self.feed_forward(x, **kwargs)[self.layers[-].output_name]", "docstring": "Compute class posterior probabilities for the given set of data.\n\n Parameters\n ----------\n x : ndarray (num-examples, num-variables)\n An array containing examples to predict. Examples are given as the\n rows in this array.\n\n Returns\n -------\n p : ndarray (num-examples, num-classes)\n An array of class posterior probability values, one per row of input\n data.", "id": "f7168:c2:m4"} {"signature": "def monitors(self, **kwargs):", "body": "monitors = super(Classifier, self).monitors(**kwargs)regs = regularizers.from_kwargs(self, **kwargs)outputs, _ = self.build_graph(regs)return monitors + [('', self.losses[].accuracy(outputs))]", "docstring": "Return expressions that should be computed to monitor training.\n\n Returns\n -------\n monitors : list of (name, expression) pairs\n A list of named monitor expressions to compute for this network.", "id": "f7168:c2:m1"} {"signature": "def predict_logit(self, x, **kwargs):", "body": "return self.feed_forward(x, **kwargs)[self.layers[-].full_name('')]", "docstring": "Compute the logit values that underlie the softmax output.\n\n Parameters\n ----------\n x : ndarray (num-examples, num-variables)\n An array containing examples to classify. Examples are given as the\n rows in this array.\n\n Returns\n -------\n l : ndarray (num-examples, num-classes)\n An array of posterior class logit values, one row of logit values\n per row of input data.", "id": "f7168:c2:m5"} {"signature": "def log(self):", "body": "util.log('''', self)", "docstring": "Log some diagnostic info about this regularizer.", "id": "f7169:c7:m1"} {"signature": "def loss(self, layers, outputs):", "body": "return ", "docstring": "Compute a scalar term to add to the loss function for a model.\n\n Parameters\n ----------\n layers : list of :class:`theanets.layers.Layer`\n A list of the layers in the model being regularized.\n outputs : dict of Theano expressions\n A dictionary mapping string expression names to their corresponding\n Theano expressions in the computation graph. This dictionary\n contains the fully-scoped name of every layer output in the graph.", "id": "f7169:c0:m3"} {"signature": "def modify_graph(self, outputs):", "body": "pass", "docstring": "Modify the outputs of a particular layer in the computation graph.\n\n Parameters\n ----------\n outputs : dict of Theano expressions\n A map from string output names to the corresponding Theano\n expression. This dictionary contains the fully-scoped name of all\n outputs from a single layer in the computation graph.\n\n This map is mutable, so any changes that the regularizer makes will\n be retained when the caller regains control.\n\n Notes\n -----\n\n This method is applied during graph-construction time to change the\n behavior of one or more layer outputs. For example, the\n :class:`BernoulliDropout` class replaces matching outputs with an\n expression containing \"masked\" outputs, where some elements are randomly\n set to zero each time the expression is evaluated.\n\n Any regularizer that needs to modify the structure of the computation\n graph should implement this method.", "id": "f7169:c0:m2"} {"signature": "def itertrain(self, train, valid=None, **kwargs):", "body": "from . import feedforwardoriginal_layer_names = set(l.name for l in self.network.layers[:-])layers_ = list(l.to_spec() for l in self.network.layers[:-])for i, l in enumerate(layers_[::-][:-]):layers_.append(dict(form='', partner=l[''], activation=l['']))layers_.append(dict(form='', partner=layers_[][''], activation=''))util.log('')ae = feedforward.Autoencoder(layers=layers_)pre = SupervisedPretrainer(self.algo, ae)for monitors in pre.itertrain(train, valid, **kwargs):yield monitorsfor param in ae.params:l, p = param.name.split('')if l in original_layer_names:util.log('', param.name)self.network.find(l, p).set_value(param.get_value())util.log('')", "docstring": "Train a model using a training and validation set.\n\n This method yields a series of monitor values to the caller. After every\n iteration, a pair of monitor dictionaries is generated: one evaluated on\n the training dataset, and another evaluated on the validation dataset.\n The validation monitors might not be updated during every training\n iteration; in this case, the most recent validation monitors will be\n yielded along with the training monitors.\n\n Parameters\n ----------\n train : :class:`Dataset `\n A set of training data for computing updates to model parameters.\n valid : :class:`Dataset `\n A set of validation data for computing monitor values and\n determining when the loss has stopped improving.\n\n Yields\n ------\n training : dict\n A dictionary mapping monitor names to values, evaluated on the\n training dataset.\n validation : dict\n A dictionary containing monitor values evaluated on the validation\n dataset.", "id": "f7170:c3:m1"} {"signature": "def itertrain(self, train, valid=None, **kwargs):", "body": "net = self.networkoriginal = list(net.layers)output_name = original[-].output_nametied = any(isinstance(l, layers.Tied) for l in original)L = + len(original) // if tied else len(original) - for i in range(, L):tail = []if i == L - :net.layers = originalelif tied:net.layers = original[:i+]for j in range(i):prev = tail[-] if tail else net.layers[-]tail.append(layers.Layer.build('', partner=original[i-j].name, inputs=prev.name))net.layers = original[:i+] + tailelse:tail.append(layers.Layer.build('',name='',inputs=original[i].output_name,size=original[-].output_size,activation=original[-].kwargs['']))net.layers = original[:i+] + tailutil.log('',''.join(l.name for l in net.layers))[l.bind(net, initialize=False) for l in net.layers][l.setup() for l in tail]net.losses[].output_name = net.layers[-].output_nametrainer = DownhillTrainer(self.algo, net)for monitors in trainer.itertrain(train, valid, **kwargs):yield monitorsnet.layers = originalnet.losses[].output_name = output_name", "docstring": "Train a model using a training and validation set.\n\n This method yields a series of monitor values to the caller. After every\n iteration, a pair of monitor dictionaries is generated: one evaluated on\n the training dataset, and another evaluated on the validation dataset.\n The validation monitors might not be updated during every training\n iteration; in this case, the most recent validation monitors will be\n yielded along with the training monitors.\n\n Parameters\n ----------\n train : :class:`Dataset `\n A set of training data for computing updates to model parameters.\n valid : :class:`Dataset `\n A set of validation data for computing monitor values and\n determining when the loss has stopped improving.\n\n Yields\n ------\n training : dict\n A dictionary mapping monitor names to values, evaluated on the\n training dataset.\n validation : dict\n A dictionary containing monitor values evaluated on the validation\n dataset.", "id": "f7170:c2:m1"} {"signature": "@staticmethoddef reservoir(xs, n, rng):", "body": "pool = []for i, x in enumerate(xs):if len(pool) < n:pool.append(x / np.linalg.norm(x))continuej = rng.randint(i + )if j < n:pool[j] = x / np.linalg.norm(x)L = len(pool)S = np.std(pool, axis=)while len(pool) < n:x = pool[rng.randint(L)]pool.append(x + S * rng.randn(*x.shape))return np.array(pool, dtype=pool[].dtype)", "docstring": "Select a random sample of n items from xs.", "id": "f7170:c1:m0"} {"signature": "def __call__(self, x):", "body": "raise NotImplementedError", "docstring": "Compute a symbolic expression for this activation function.\n\n Parameters\n ----------\n x : Theano expression\n A Theano expression representing the input to this activation\n function.\n\n Returns\n -------\n y : Theano expression\n A Theano expression representing the output from this activation\n function.", "id": "f7171:c0:m1"} {"signature": "@propertydef input_name(self):", "body": "if len(self._input_shapes) != :raise util.ConfigurationError(''.format(self.name, self._input_shapes))return list(self._input_shapes)[]", "docstring": "Name of layer input (for layers with one input).", "id": "f7173:c0:m2"} {"signature": "def bind(self, graph, reset=True, initialize=True):", "body": "if reset:for k in self._input_shapes:self._input_shapes[k] = Nonefor k in self._output_shapes:self._output_shapes[k] = Noneself.resolve_inputs(graph.layers)self.resolve_outputs()self.activate = activations.build(self.kwargs.get('', ''), self)if initialize:self.setup()self.log()", "docstring": "Bind this layer into a computation graph.\n\n This method is a wrapper for performing common initialization tasks. It\n calls :func:`resolve`, :func:`setup`, and :func:`log`.\n\n Parameters\n ----------\n graph : :class:`Network `\n A computation network in which this layer is to be bound.\n reset : bool, optional\n If ``True`` (the default), reset the resolved layers for this layer.\n initialize : bool, optional\n If ``True`` (the default), initialize the parameters for this layer\n by calling :func:`setup`.\n\n Raises\n ------\n theanets.util.ConfigurationError :\n If an input cannot be resolved.", "id": "f7173:c0:m11"} {"signature": "@propertydef output_name(self):", "body": "return self.full_name('')", "docstring": "Full name of the default output for this layer.", "id": "f7173:c0:m5"} {"signature": "def log(self):", "body": "inputs = ''.join(''.format(*ns) for ns in self._input_shapes.items())util.log('',self, getattr(self.activate, '', self.activate), inputs)util.log('', self.log_params())", "docstring": "Log some information about this layer.", "id": "f7173:c0:m15"} {"signature": "def connect(self, inputs):", "body": "outputs, updates = self.transform(inputs)if isinstance(outputs, dict):outputs = sorted(outputs.items())if isinstance(outputs, (TT.TensorVariable, SS.SparseVariable)):outputs = [('', outputs)]outs = {self.full_name(name): expr for name, expr in outputs}return outs, updates", "docstring": "Create Theano variables representing the outputs of this layer.\n\n Parameters\n ----------\n inputs : dict of Theano expressions\n Symbolic inputs to this layer, given as a dictionary mapping string\n names to Theano expressions. Each string key should be of the form\n \"{layer_name}:{output_name}\" and refers to a specific output from\n a specific layer in the graph.\n\n Returns\n -------\n outputs : dict\n A dictionary mapping names to Theano expressions for the outputs\n from this layer.\n updates : sequence of (parameter, expression) tuples\n Updates that should be performed by a Theano function that computes\n something using this layer.", "id": "f7173:c0:m9"} {"signature": "@propertydef input_size(self):", "body": "shape = self.input_shapeif shape is None:raise util.ConfigurationError(''.format(self.name))return shape[-]", "docstring": "Size of layer input (for layers with one input).", "id": "f7173:c0:m4"} {"signature": "def add_weights(self, name, nin, nout, mean=, std=, sparsity=, diagonal=):", "body": "glorot = / np.sqrt(nin + nout)m = self.kwargs.get(''.format(name), self.kwargs.get('', mean))s = self.kwargs.get(''.format(name), self.kwargs.get('', std or glorot))p = self.kwargs.get(''.format(name), self.kwargs.get('', sparsity))d = self.kwargs.get(''.format(name), self.kwargs.get('', diagonal))self._params.append(theano.shared(util.random_matrix(nin, nout, mean=m, std=s, sparsity=p,diagonal=d, rng=self.rng),name=self._fmt(name)))", "docstring": "Helper method to create a new weight matrix.\n\n Parameters\n ----------\n name : str\n Name of the parameter to add.\n nin : int\n Size of \"input\" for this weight matrix.\n nout : int\n Size of \"output\" for this weight matrix.\n mean : float, optional\n Mean value for randomly-initialized weights. Defaults to 0.\n std : float, optional\n Standard deviation of initial matrix values. Defaults to\n :math:`1 / sqrt(n_i + n_o)`.\n sparsity : float, optional\n Fraction of weights to be set to zero. Defaults to 0.\n diagonal : float, optional\n Initialize weights to a matrix of zeros with this value along the\n diagonal. Defaults to None, which initializes all weights randomly.", "id": "f7173:c0:m20"} {"signature": "def log_params(self):", "body": "total = for p in self.params:shape = p.get_value().shapeutil.log('', p.name, shape)total += np.prod(shape)return total", "docstring": "Log information about this layer's parameters.", "id": "f7173:c0:m16"} {"signature": "def add_conv_weights(self, name, mean=, std=None, sparsity=):", "body": "nin = self.input_sizenout = self.output_sizemean = self.kwargs.get(''.format(name),self.kwargs.get('', mean))std = self.kwargs.get(''.format(name),self.kwargs.get('', std or / np.sqrt(nin + nout)))sparsity = self.kwargs.get(''.format(name),self.kwargs.get('', sparsity))arr = np.zeros((nout, nin) + self.filter_size, util.FLOAT)for r in range(self.filter_size[]):for c in range(self.filter_size[]):arr[:, :, r, c] = util.random_matrix(nout, nin, mean, std, sparsity=sparsity, rng=self.rng)self._params.append(theano.shared(arr, name=self._fmt(name)))", "docstring": "Add a convolutional weight array to this layer's parameters.\n\n Parameters\n ----------\n name : str\n Name of the parameter to add.\n mean : float, optional\n Mean value for randomly-initialized weights. Defaults to 0.\n std : float, optional\n Standard deviation of initial matrix values. Defaults to\n :math:`1 / sqrt(n_i + n_o)`.\n sparsity : float, optional\n Fraction of weights to set to zero. Defaults to 0.", "id": "f7175:c0:m2"} {"signature": "def _create_rates(self, dist='', size=None, eps=):", "body": "if size is None:size = self.output_sizeif dist == '':z = np.random.uniform(eps, - eps, size=size).astype(util.FLOAT)return theano.shared(z, name=self._fmt(''))if dist == '':z = np.random.uniform(-, -eps, size=size).astype(util.FLOAT)return theano.shared(np.exp(z), name=self._fmt(''))return None", "docstring": "Create a rate parameter (usually for a recurrent network layer).\n\n Parameters\n ----------\n dist : {'uniform', 'log'}, optional\n Distribution of rate values. Defaults to ``'uniform'``.\n size : int, optional\n Number of rates to create. Defaults to ``self.output_size``.\n eps : float, optional\n A \"buffer\" preventing rate values from getting too close to 0 or 1.\n Defaults to 1e-4.\n\n Returns\n -------\n rates : theano shared or None\n A vector of rate parameters for certain types of recurrent layers.", "id": "f7176:c0:m4"} {"signature": "def find(self, which, param):", "body": "for i, layer in enumerate(self.layers):if which == i or which == layer.name:return layer.find(param)raise KeyError(which)", "docstring": "Get a parameter from a layer in the network.\n\n Parameters\n ----------\n which : int or str\n The layer that owns the parameter to return.\n\n If this is an integer, then 0 refers to the input layer, 1 refers\n to the first hidden layer, 2 to the second, and so on.\n\n If this is a string, the layer with the corresponding name, if any,\n will be used.\n\n param : int or str\n Name of the parameter to retrieve from the specified layer, or its\n index in the parameter list of the layer.\n\n Raises\n ------\n KeyError\n If there is no such layer, or if there is no such parameter in the\n specified layer.\n\n Returns\n -------\n param : Theano shared variable\n A shared parameter variable from the indicated layer.", "id": "f7180:c0:m11"} {"signature": "def updates(self, **kwargs):", "body": "regs = regularizers.from_kwargs(self, **kwargs)_, updates = self.build_graph(regs)return updates", "docstring": "Return expressions to run as updates during network training.\n\n Returns\n -------\n updates : list of (parameter, expression) pairs\n A list of named parameter update expressions for this network.", "id": "f7180:c0:m21"} {"signature": "def build_graph(self, regularizers=()):", "body": "key = self._hash(regularizers)if key not in self._graphs:util.log('')for loss in self.losses:loss.log()for reg in regularizers:reg.log()outputs = {}updates = []for layer in self.layers:out, upd = layer.connect(outputs)for reg in regularizers:reg.modify_graph(out)outputs.update(out)updates.extend(upd)self._graphs[key] = outputs, updatesreturn self._graphs[key]", "docstring": "Connect the layers in this network to form a computation graph.\n\n Parameters\n ----------\n regularizers : list of :class:`theanets.regularizers.Regularizer`\n A list of the regularizers to apply while building the computation\n graph.\n\n Returns\n -------\n outputs : list of Theano variables\n A list of expressions giving the output of each layer in the graph.\n updates : list of update tuples\n A list of updates that should be performed by a Theano function that\n computes something using this graph.", "id": "f7180:c0:m7"} {"signature": "def itertrain(self, train, valid=None, algo='', subalgo='',save_every=, save_progress=None, **kwargs):", "body": "if '' not in kwargs:kwargs[''] = self._rngdef create_dataset(data, **kwargs):name = kwargs.get('', '')s = ''.format(name)return downhill.Dataset(data,name=name,batch_size=kwargs.get('', ),iteration_size=kwargs.get('', kwargs.get(s)),axis=kwargs.get('', ),rng=kwargs[''])if valid is None:valid = trainif not isinstance(valid, downhill.Dataset):valid = create_dataset(valid, name='', **kwargs)if not isinstance(train, downhill.Dataset):train = create_dataset(train, name='', **kwargs)if '' in kwargs:warnings.warn('',DeprecationWarning)algo = kwargs.pop('')if isinstance(algo, (list, tuple)):algo = algo[]if isinstance(algo, util.basestring):algo = algo.lower()if algo == '':algo = trainer.SampleTrainer(self)elif algo.startswith('') or algo.startswith(''):algo = trainer.SupervisedPretrainer(subalgo, self)elif algo.startswith('') or algo.startswith(''):algo = trainer.UnsupervisedPretrainer(subalgo, self)else:algo = trainer.DownhillTrainer(algo, self)def needs_saving(elapsed, iteration):if save_progress is None:return Falseif isinstance(save_every, float):return elapsed > * save_everyif isinstance(save_every, int):return iteration % save_every == return Falsestart = time.time()for i, monitors in enumerate(algo.itertrain(train, valid, **kwargs)):yield monitorsnow = time.time()if i and needs_saving(now - start, i):filename_or_handle = save_progressif isinstance(filename_or_handle, util.basestring):filename_or_handle = save_progress.format(int(now))self.save(filename_or_handle)start = now", "docstring": "Train our network, one batch at a time.\n\n This method yields a series of ``(train, valid)`` monitor pairs. The\n ``train`` value is a dictionary mapping names to monitor values\n evaluated on the training dataset. The ``valid`` value is also a\n dictionary mapping names to values, but these values are evaluated on\n the validation dataset.\n\n Because validation might not occur every training iteration, the\n validation monitors might be repeated for multiple training iterations.\n It is probably most helpful to think of the validation monitors as being\n the \"most recent\" values that have been computed.\n\n After training completes, the network attribute of this class will\n contain the trained network parameters.\n\n Parameters\n ----------\n train : :class:`Dataset ` or list\n A dataset to use when training the network. If this is a\n ``downhill.Dataset`` instance, it will be used directly as the\n training datset. If it is a list of numpy arrays or a list of\n callables, it will be converted to a ``downhill.Dataset`` and then\n used as the training set.\n valid : :class:`Dataset ` or list, optional\n If this is provided, it will be used as a validation dataset. If not\n provided, the training set will be used for validation. (This is not\n recommended!)\n algo : str, optional\n An optimization algorithm to use for training our network. If not\n provided, :class:`RMSProp ` will be used.\n subalgo : str, optional\n An optimization algorithm to use for a trainer that requires a\n \"sub-algorithm,\" sugh as an unsupervised pretrainer. Defaults to\n :class:`RMSProp `.\n save_every : int or float, optional\n If this is nonzero and ``save_progress`` is not None, then the model\n being trained will be saved periodically. If this is a float, it is\n treated as a number of minutes to wait between savings. If it is an\n int, it is treated as the number of training epochs to wait between\n savings. Defaults to 0.\n save_progress : str or file handle, optional\n If this is not None, and ``save_progress`` is nonzero, then save the\n model periodically during training. This parameter gives either (a)\n the full path of a file to save the model, or (b) a file-like object\n where the model should be saved. If it is a string and the given\n name contains a \"{}\" format specifier, it will be filled with the\n integer Unix timestamp at the time the model is saved. Defaults to\n None, which does not save models.\n\n Yields\n ------\n training : dict\n A dictionary of monitor values computed using the training dataset,\n at the conclusion of training. This dictionary will at least contain\n a 'loss' key that indicates the value of the loss function. Other\n keys may be available depending on the trainer being used.\n validation : dict\n A dictionary of monitor values computed using the validation\n dataset, at the conclusion of training.", "id": "f7180:c0:m4"} {"signature": "def score(self, x, y, w=None, **kwargs):", "body": "u = y - self.predict(x, **kwargs)v = y - y.mean()if w is None:w = np.ones_like(u)return - (w * u * u).sum() / (w * v * v).sum()", "docstring": "Compute R^2 coefficient of determination for a given labeled input.\n\n Parameters\n ----------\n x : ndarray (num-examples, num-inputs)\n An array containing data to be fed into the network. Multiple\n examples are arranged as rows in this array, with columns containing\n the variables for each example.\n y : ndarray (num-examples, num-outputs)\n An array containing expected target data for the network. Multiple\n examples are arranged as rows in this array, with columns containing\n the variables for each example.\n\n Returns\n -------\n r2 : float\n The R^2 correlation between the prediction of this netork and its\n target output.", "id": "f7180:c0:m14"} {"signature": "def _hash(self, regularizers=()):", "body": "def add(s):h.update(str(s).encode(''))h = hashlib.md5()for l in self.layers:add(''.format(l.__class__.__name__, l.name, l.output_shape))for l in self.losses:add(''.format(l.__class__.__name__, l.weight))for r in regularizers:add(''.format(r.__class__.__name__, r.weight, r.pattern))return h.hexdigest()", "docstring": "Construct a string key for representing a computation graph.\n\n This key will be unique for a given (a) network topology, (b) set of\n losses, and (c) set of regularizers.\n\n Returns\n -------\n key : str\n A hash representing the computation graph for the current network.", "id": "f7180:c0:m6"} {"signature": "def train(self, *args, **kwargs):", "body": "monitors = Nonefor monitors in self.itertrain(*args, **kwargs):passreturn monitors", "docstring": "Train the network until the trainer converges.\n\n All arguments are passed to :func:`itertrain`.\n\n Returns\n -------\n training : dict\n A dictionary of monitor values computed using the training dataset,\n at the conclusion of training. This dictionary will at least contain\n a 'loss' key that indicates the value of the loss function. Other\n keys may be available depending on the trainer being used.\n validation : dict\n A dictionary of monitor values computed using the validation\n dataset, at the conclusion of training.", "id": "f7180:c0:m5"} {"signature": "@classmethoddef load(cls, filename_or_handle):", "body": "assert not isinstance(cls, Network),''if isinstance(filename_or_handle, util.basestring):opener = gzip.open if filename_or_handle.lower().endswith('') else openhandle = opener(filename_or_handle, '')else:handle = filename_or_handlemodel = pickle.load(handle)if isinstance(filename_or_handle, util.basestring):handle.close()util.log('', filename_or_handle)return model", "docstring": "Load a saved network from disk.\n\n Parameters\n ----------\n filename_or_handle : str or file handle\n Load the state of this network from a pickle file. If this parameter\n is a string, it names the file where the pickle will be saved. If it\n is a file-like object, this object will be used for reading the\n pickle. If the filename ends in \".gz\" then the output will\n automatically be gunzipped.", "id": "f7180:c0:m18"} {"signature": "def monitors(self, **kwargs):", "body": "regs = regularizers.from_kwargs(self, **kwargs)outputs, _ = self.build_graph(regs)monitors = [('', self.losses[](outputs))]def matching(pattern):''''''for name, expr in util.outputs_matching(outputs, pattern):yield name, exprfor name, expr in util.params_matching(self.layers, pattern):yield name, exprdef parse_levels(levels):''''''if isinstance(levels, dict):levels = levels.items()if isinstance(levels, (int, float)):levels = [levels]for level in levels:if isinstance(level, (tuple, list)):label, call = levelyield ''.format(label), callif isinstance(level, (int, float)):def call(expr):return (expr < level).mean()yield ''.format(level), callinputs = kwargs.get('', {})if isinstance(inputs, dict):inputs = inputs.items()for pattern, levels in inputs:for name, expr in matching(pattern):for key, value in parse_levels(levels):monitors.append((''.format(name, key), value(expr)))return monitors", "docstring": "Return expressions that should be computed to monitor training.\n\n Returns\n -------\n monitors : list of (name, expression) pairs\n A list of named monitor expressions to compute for this network.", "id": "f7180:c0:m20"} {"signature": "def __call__(self, outputs):", "body": "output = outputs[self.output_name]k = output.shape[-]n = TT.prod(output.shape) // kprob = output.reshape((n, k))[TT.arange(n), self._target.reshape((n, ))]nlp = -TT.log(TT.clip(prob, , ))if self._weights is not None:return (self._weights.reshape((n, )) * nlp).sum() / self._weights.sum()return nlp.mean()", "docstring": "Construct the computation graph for this loss function.\n\n Parameters\n ----------\n outputs : dict of Theano expressions\n A dictionary mapping network output names to Theano expressions\n representing the outputs of a computation graph.\n\n Returns\n -------\n loss : Theano expression\n The values of the loss given the network output.", "id": "f7181:c6:m1"} {"signature": "def decode(self, enc):", "body": "return ''.join(self._rev_index[c] for c in enc)", "docstring": "Encode a text string by replacing characters with alphabet index.\n\n Parameters\n ----------\n classes : list of int\n A sequence of alphabet index values to convert to text.\n\n Returns\n -------\n txt : str\n A string containing corresponding characters from the alphabet.", "id": "f7182:c0:m2"} {"signature": "def encode(self, txt):", "body": "return list(self._fwd_index.get(c, ) for c in txt)", "docstring": "Encode a text string by replacing characters with alphabet index.\n\n Parameters\n ----------\n txt : str\n A string to encode.\n\n Returns\n -------\n classes : list of int\n A sequence of alphabet index values corresponding to the given text.", "id": "f7182:c0:m1"} {"signature": "def plot_layers(weights, tied_weights=False, channels=):", "body": "if hasattr(weights[], ''):weights = [w.get_value() for w in weights]k = min(len(weights), )imgs = np.eye(weights[].shape[])for i, weight in enumerate(weights[:-]):imgs = np.dot(weight.T, imgs)plot_images(imgs, + * k + i + ,channels=channels,title=''.format(i+))weight = weights[-]n = weight.shape[] / channelsif int(np.sqrt(n)) ** != n:returnif tied_weights:imgs = np.dot(weight.T, imgs)plot_images(imgs, + * k + k,channels=channels,title=''.format(k))else:plot_images(weight, + * k + k,channels=channels,title='')", "docstring": "Create a plot of weights, visualized as \"bottom-level\" pixel arrays.", "id": "f7208:m4"} {"signature": "def readline(self, use_raw=None, prompt=''):", "body": "if self.closed: raise ValueErrorif == len(self.input):self.closed = Trueraise EOFErrorline = self.input[]del self.input[]return line", "docstring": "Read a line of input. EOFError will be raised on EOF.\n\n Note that we don't support prompting", "id": "f7220:c0:m3"} {"signature": "def open(self, inp, opts=None):", "body": "if isinstance(inp, list):self.input = inpelse:raise IOError(\"\" % (type(inp), inp))return", "docstring": "Use this to set where to read from.", "id": "f7220:c0:m2"} {"signature": "def open(self, output):", "body": "if isinstance(output, types.Listype):self.output = outputelse:raise IOError(\"\" % (type(output),output))return", "docstring": "Use this to set where to write to. output can be a\n file object or a string. This code raises IOError on error.\n\n If another file was previously open upon calling this open,\n that will be stacked and will come back into use after\n a close_write().", "id": "f7220:c1:m3"} {"signature": "def id_name_checker(self):", "body": "name2id = Mthread.map_thread_names()for thread_id, f in list(sys._current_frames().items()):self.assertEqual(thread_id,name2id[Mthread.id2thread_name(thread_id)])self.assertNotEqual(f, Mthread.find_debugged_frame(f))pass", "docstring": "Helper for testing map_thread_names and id2thread", "id": "f7245:c1:m0"} {"signature": "def writeline(self, msg):", "body": "self.write(msg)self.output.append('')return", "docstring": "used to write to a debugger that is connected to this\n server; Here, we use the null string '' as an indicator of a\n newline.", "id": "f7286:c1:m5"} {"signature": "def close(self):", "body": "self.closed = Truereturn", "docstring": "Interface is for compatibility", "id": "f7286:c0:m1"} {"signature": "def open(self, inp, opts=None):", "body": "if isinstance(inp, list):self.input = inpelse:raise IOError(\"\" % (type(inp), inp))return", "docstring": "Use this to set where to read from.", "id": "f7286:c0:m2"} {"signature": "def readline(self, use_raw=None, prompt=''):", "body": "if self.closed: raise ValueErrorif == len(self.input):self.closed = Trueraise EOFErrorline = self.input[]del self.input[]return line", "docstring": "Read a line of input. EOFError will be raised on EOF.\n\n Note that we don't support prompting", "id": "f7286:c0:m3"} {"signature": "def close(self):", "body": "self.closed = Truereturn", "docstring": "Nothing to do here. Interface is for compatibility", "id": "f7286:c1:m1"} {"signature": "def readline(self, prompt='', use_raw=None):", "body": "line = self.input.readline()if not line: raise EOFErrorreturn line.rstrip(\"\")", "docstring": "Read a line of input. Prompt and use_raw exist to be\n compatible with other input routines and are ignored.\n EOFError will be raised on EOF.", "id": "f7288:c0:m3"} {"signature": "def run(self, cmd, start_opts=None, globals_=None, locals_=None):", "body": "if globals_ is None:globals_ = globals()if locals_ is None:locals_ = globals_if not isinstance(cmd, types.CodeType):self.eval_string = cmdcmd = cmd+''passretval = Noneself.core.start(start_opts)try:retval = eval(cmd, globals_, locals_)except SyntaxError:try:exec(cmd, globals_, locals_)except DebuggerQuit:passexcept DebuggerQuit:passpassexcept DebuggerQuit:passfinally:self.core.stop()return retval", "docstring": "Run debugger on string `cmd' using builtin function eval\n and if that builtin exec. Arguments `globals_' and `locals_'\n are the dictionaries to use for local and global variables. By\n default, the value of globals is globals(), the current global\n variables. If `locals_' is not given, it becomes a copy of\n `globals_'.\n\n Debugger.core.start settings are passed via optional\n dictionary `start_opts'. Overall debugger settings are in\n Debugger.settings which changed after an instance is created\n . Also see `run_eval' if what you want to run is an\n run_eval'able expression have that result returned and\n `run_call' if you want to debug function run_call.", "id": "f7290:c0:m0"} {"signature": "def run_exec(self, cmd, start_opts=None, globals_=None, locals_=None):", "body": "if globals_ is None:globals_ = globals()if locals_ is None:locals_ = globals_if not isinstance(cmd, types.CodeType):cmd = cmd+''passself.core.start(start_opts)try:exec(cmd, globals_, locals_)except DebuggerQuit:passfinally:self.core.stop()return", "docstring": "Run debugger on string `cmd' which will executed via the\n builtin function exec. Arguments `globals_' and `locals_' are\n the dictionaries to use for local and global variables. By\n default, the value of globals is globals(), the current global\n variables. If `locals_' is not given, it becomes a copy of\n `globals_'.\n\n Debugger.core.start settings are passed via optional\n dictionary `start_opts'. Overall debugger settings are in\n Debugger.settings which changed after an instance is created\n . Also see `run_eval' if what you want to run is an\n run_eval'able expression have that result returned and\n `run_call' if you want to debug function run_call.", "id": "f7290:c0:m1"} {"signature": "def parse_list_cmd(proc, args, listsize=):", "body": "text = proc.current_command[len(args[])+:].strip()if text in frozenset(('', '', '', '')):if text == '':location = resolve_location(proc, '')return location.path, location.line_number, listsizeelse:if proc.list_lineno is None:proc.errmsg(\"\")return INVALID_PARSE_LISTfilename = proc.list_filenameif text == '':first = max(, proc.list_lineno + listsize)elif text == '':if proc.list_lineno == + listsize:proc.errmsg(\"\" % proc.list_filename)return INVALID_PARSE_LISTfirst = max(, proc.list_lineno - (*listsize) - )elif text == '':first = proc.list_lineno + last = first + listsize - return filename, first, lastelse:try:list_range = build_range(text)except LocationError as e:proc.errmsg(\"\")proc.errmsg(e.text)proc.errmsg(e.text_cursor)return INVALID_PARSE_LISTexcept ScannerError as e:proc.errmsg(\"\")proc.errmsg(e.text)proc.errmsg(e.text_cursor)return INVALID_PARSE_LISTif list_range.first is None:assert isinstance(list_range.last, Location)location = resolve_location(proc, list_range.last)if not location:return INVALID_PARSE_LISTlast = location.line_numberfirst = max(, last - listsize)return location.path, first, lastelif isinstance(list_range.first, int):first = list_range.firstlocation = resolve_location(proc, list_range.last)if not location:return INVALID_PARSE_LISTfilename = location.pathlast = location.line_numberif last < first:last = first + lastreturn location.path, first, lastelse:assert isinstance(list_range.first, Location)location = resolve_location(proc, list_range.first)if not location:return INVALID_PARSE_LISTfirst = location.line_numberlast = list_range.lastif location.method:first -= listsize // if isinstance(last, str):assert last[] == ''last = first + int(last[:])elif not last:last = first + listsizeelif last < first:last = first + lastreturn location.path, first, lastpassreturn", "docstring": "Parses arguments for the \"list\" command and returns the tuple:\n (filename, first line number, last line number)\n or sets these to None if there was some problem.", "id": "f7294:m0"} {"signature": "def help(self, args):", "body": "if len(args) <= :doc = self.__doc__ or self.run.__doc__if doc:self.rst_msg(doc.rstrip(''))else:self.proc.intf[-].errmsg('' +'' +self.name)passreturnsubcmd_name = args[]if '' == subcmd_name:self.section(\"\" % self.name)self.msg(self.columnize_commands(self.cmds.list()))returncmd = self.cmds.lookup(subcmd_name)if cmd:doc = cmd.__doc__ or cmd.run.__doc__if doc:self.proc.rst_msg(doc.rstrip(''))else:self.proc.intf[-].errmsg('''' %(subcmd_name, self.name))passelse:cmds = [c for c in self.cmds.list()if re.match('' + subcmd_name, c) ]if cmds == []:self.errmsg(\"\"\"\" % (self.name, subcmd_name))else:self.section(\"\" %(self.name, subcmd_name,))self.msg_nocr(self.columnize_commands(cmds))passpassreturn", "docstring": "Give help for a command which has subcommands. This can be\n called in several ways:\n help cmd\n help cmd subcmd\n help cmd commands\n\n Our shtick is to give help for the overall command only if\n subcommand or 'commands' is not given. If a subcommand is given and\n found, then specific help for that is given. If 'commands' is given\n we will list the all the subcommands.", "id": "f7308:c0:m2"} {"signature": "def _load_debugger_subcommands(self, name):", "body": "cmd_instances = []class_prefix = capitalize(name) module_dir = '' % namemod = __import__(module_dir, None, None, [''])eval_cmd_template = ''for module_name in mod.__modules__:import_name = module_dir + '' + module_nametry:command_mod = importlib.import_module(import_name)except ImportError:print((\"\" %(import_name, module_name, sys.exc_info()[])))continueclassnames = [ classname for classname, classvalue ininspect.getmembers(command_mod, inspect.isclass)if ('' != classname andclassname.startswith(class_prefix)) ]for classname in classnames:eval_cmd = eval_cmd_template % classnametry:instance = eval(eval_cmd)self.cmds.add(instance)except:print(\"\" % classname)passpasspassreturn cmd_instances", "docstring": "Create an instance of each of the debugger\n subcommands. Commands are found by importing files in the\n directory 'name' + 'sub'. Some files are excluded via an array set\n in __init__. For each of the remaining files, we import them\n and scan for class names inside those files and for each class\n name, we will create an instance of that class. The set of\n DebuggerCommand class instances form set of possible debugger\n commands.", "id": "f7308:c0:m1"} {"signature": "def msg(self, msg, opts={}):", "body": "try:return(self.debugger.intf[-].msg(msg))except EOFError:passreturn None", "docstring": "Convenience short-hand for self.debugger.intf[-1].msg", "id": "f7314:c0:m4"} {"signature": "def msg_nocr(self, msg, opts={}):", "body": "try:return(self.debugger.intf[-].msg_nocr(msg))except EOFError:passreturn None", "docstring": "Convenience short-hand for self.debugger.intf[-1].msg_nocr", "id": "f7314:c0:m5"} {"signature": "def rst_msg(self, text, opts={}):", "body": "text = Mformat.rst_text(text,'' == self.debugger.settings[''],self.debugger.settings[''])return self.msg(text)", "docstring": "Convert ReStructuredText and run through msg()", "id": "f7314:c0:m6"} {"signature": "def run(self, args):", "body": "mainfile = self.core.filename(None)if self.core.is_running():curframe = self.proc.curframeif curframe:line_no = inspect.getlineno(curframe)offset = curframe.f_lastiself.msg(\"\" % offset)offset = max(offset, )code = curframe.f_codeco_code = code.co_codedisassemble_bytes(self.msg, self.msg_nocr,co_code, offset, line_no, line_no-, line_no+,constants=code.co_consts, cells=code.co_cellvars,varnames=code.co_varnames, freevars=code.co_freevars,linestarts=dict(findlinestarts(code)),end_offset=offset+)passpasselse:if mainfile:part1 = \"\" % mainfilemsg = \"\"self.msg(Mmisc.wrapped_lines(part1, msg,self.settings['']))else:self.msg('')passself.msg(self.core.execution_status)passreturn False", "docstring": "Program counter.", "id": "f7327:c0:m0"} {"signature": "def show_category(self, category, args):", "body": "n2cmd = self.proc.commandsnames = list(n2cmd.keys())if len(args) == and args[] == '':self.section(\"\" % category)cmds = [cmd for cmd in names if category == n2cmd[cmd].category]cmds.sort()self.msg_nocr(self.columnize_commands(cmds))returnself.msg(\"\" % categories[category])self.section(\"\")names.sort()for name in names: if category != n2cmd[name].category: continueself.msg(\"\" % (name, n2cmd[name].short_help,))passreturn", "docstring": "Show short help for all commands in `category'.", "id": "f7357:c0:m3"} {"signature": "def threaded_quit(self, arg):", "body": "threading_list = threading.enumerate()mythread = threading.currentThread()for t in threading_list:if t != mythread:ctype_async_raise(t, Mexcept.DebuggerQuit)passpassraise Mexcept.DebuggerQuit", "docstring": "quit command when several threads are involved.", "id": "f7359:c0:m1"} {"signature": "def find_and_set_debugged_frame(self, frame, thread_id):", "body": "thread = threading._active[thread_id]thread_name = thread.getName()if (not self.settings[''] andthread_name == Mthread.current_thread_name()):newframe = Mthread.find_debugged_frame(frame)if newframe is not None: frame = newframepassself.stack, self.curindex = Mcmdproc.get_stack(frame, None,self.proc)self.proc.stack, self.proc.curindex = self.stack, self.curindexself.proc.frame_thread_name = thread_namereturn", "docstring": "The dance we have to do to set debugger frame state to\n *frame*, which is in the thread with id *thread_id*. We may\n need to the hide initial debugger frames.", "id": "f7360:c0:m1"} {"signature": "def confirm(self, msg, default=False):", "body": "return(self.debugger.intf[-].confirm(msg, default))", "docstring": "Convenience short-hand for self.debugger.intf.confirm", "id": "f7361:c0:m2"} {"signature": "def rst_msg(self, text):", "body": "return(self.proc.rst_msg(text))", "docstring": "Convenience short-hand for self.proc.rst_msg(text)", "id": "f7361:c0:m6"} {"signature": "def interact(banner=None, readfunc=None, my_locals=None, my_globals=None):", "body": "console = code.InteractiveConsole(my_locals, filename='')console.runcode = lambda code_obj: runcode(console, code_obj)setattr(console, '', my_globals)if readfunc is not None:console.raw_input = readfuncelse:try:import readlineexcept ImportError:passconsole.interact(banner)pass", "docstring": "Almost a copy of code.interact\n Closely emulate the interactive Python interpreter.\n\n This is a backwards compatible interface to the InteractiveConsole\n class. When readfunc is not specified, it attempts to import the\n readline module to enable GNU readline if it is available.\n\n Arguments (all optional, all default to None):\n\n banner -- passed to InteractiveConsole.interact()\n readfunc -- if not None, replaces InteractiveConsole.raw_input()\n local -- passed to InteractiveInterpreter.__init__()", "id": "f7367:m0"} {"signature": "def dbgr(self, string):", "body": "print('')self.proc.cmd_queue.append(string)self.proc.process_command()return", "docstring": "Invoke a debugger command from inside a python shell called inside\n the debugger.", "id": "f7370:c0:m0"} {"signature": "def cache_from_source(path, debug_override=None):", "body": "debug = not sys.flags.optimize if debug_override is None else debug_overrideif debug:suffixes = DEBUG_BYTECODE_SUFFIXESelse:suffixes = OPTIMIZED_BYTECODE_SUFFIXESpasshead, tail = os.path.split(path)base_filename, sep, _ = tail.partition('')if not hasattr(sys, ''):raise NotImplementedError('')tag = sys.implementation.cache_tagif tag is None:raise NotImplementedError('')filename = ''.join([base_filename, sep, tag, suffixes[]])return os.path.join(head, _PYCACHE, filename)", "docstring": "Given the path to a .py file, return the path to its .pyc/.pyo file.\n\n The .py file does not need to exist; this simply returns the path to the\n .pyc/.pyo file calculated as if the .py file were imported. The extension\n will be .pyc unless sys.flags.optimize is non-zero, then it will be .pyo.\n\n If debug_override is not None, then it must be a boolean and is used in\n place of sys.flags.optimize.\n\n If sys.implementation.cache_tag is None then NotImplementedError is raised.", "id": "f7377:m0"} {"signature": "def interact(banner=None, readfunc=None, my_locals=None, my_globals=None):", "body": "def console_runcode(code_obj):runcode(console, code_obj)console = code.InteractiveConsole(my_locals, filename='')console.runcode = console_runcodesetattr(console, '', my_globals)if readfunc is not None:console.raw_input = readfuncelse:try:import readlineexcept ImportError:passconsole.interact(banner)pass", "docstring": "Almost a copy of code.interact\n Closely emulate the interactive Python interpreter.\n\n This is a backwards compatible interface to the InteractiveConsole\n class. When readfunc is not specified, it attempts to import the\n readline module to enable GNU readline if it is available.\n\n Arguments (all optional, all default to None):\n\n banner -- passed to InteractiveConsole.interact()\n readfunc -- if not None, replaces InteractiveConsole.raw_input()\n local -- passed to InteractiveInterpreter.__init__()", "id": "f7399:m0"} {"signature": "def event_processor(self, frame, event, arg):", "body": "out = self.debugger.intf[-].outputlineno = frame.f_linenofilename = self.core.canonic_filename(frame)filename = self.core.filename(filename)if not out:print(\"\" % (event, filename, lineno))else:out.write(\"\" % (event, filename, lineno))if arg is not None:out.writeline('' % repr(arg))else:out.writeline('')passpassreturn self.event_processor", "docstring": "A simple event processor that prints out events.", "id": "f7405:c0:m1"} {"signature": "def complete_token_filtered(aliases, prefix, expanded):", "body": "complete_ary = list(aliases.keys())results = [cmd for cmd incomplete_ary if cmd.startswith(prefix)] and not (cmd in aliases and expanded not in aliases[cmd])return sorted(results, key=lambda pair: pair[])", "docstring": "Find all starting matches in dictionary *aliases* that start\n with *prefix*, but filter out any matches already in\n *expanded*.", "id": "f7406:m0"} {"signature": "def complete_identifier(cmd, prefix):", "body": "if not cmd.proc.curframe: return [None]ns = cmd.proc.curframe.f_globals.copy()ns.update(cmd.proc.curframe.f_locals)if '' in prefix:dotted = prefix.split('')try:obj = ns[dotted[]]for part in dotted[:-]:obj = getattr(obj, part)except (KeyError, AttributeError):return []pre_prefix = ''.join(dotted[:-]) + ''return [pre_prefix + n for n in dir(obj) ifn.startswith(dotted[-])]else:return Mcomplete.complete_token(ns.keys(), prefix)", "docstring": "Complete an arbitrary expression.", "id": "f7406:m5"} {"signature": "def lookup(self, subcmd_prefix):", "body": "for subcmd_name in list(self.subcmds.keys()):if subcmd_name.startswith(subcmd_prefix)and len(subcmd_prefix) >=self.subcmds[subcmd_name].__class__.min_abbrev:return self.subcmds[subcmd_name]passreturn None", "docstring": "Find subcmd in self.subcmds", "id": "f7407:c0:m1"} {"signature": "def add(self, subcmd_cb):", "body": "subcmd_name = subcmd_cb.nameself.subcmds[subcmd_name] = subcmd_cbself.cmdlist.append(subcmd_name)", "docstring": "Add subcmd to the available subcommands for this object.\n It will have the supplied docstring, and subcmd_cb will be called\n when we want to run the command. min_len is the minimum length\n allowed to abbreviate the command. in_list indicates with the\n show command will be run when giving a list of all sub commands\n of this object. Some commands have long output like \"show commands\"\n so we might not want to show that.", "id": "f7407:c0:m3"} {"signature": "def help(self, *args):", "body": "print(args)subcmd_prefix = args[]if not subcmd_prefix or len(subcmd_prefix) == :self.msg(self.doc)self.msg(", "docstring": "help for subcommands.", "id": "f7407:c0:m5"} {"signature": "def get_int_noerr(self, arg):", "body": "if self.curframe:g = self.curframe.f_globalsl = self.curframe.f_localselse:g = globals()l = locals()passtry:val = int(eval(arg, g, l))except (SyntaxError, NameError, ValueError, TypeError):return Nonereturn val", "docstring": "Eval arg and it is an integer return the value. Otherwise\n return None", "id": "f7408:c0:m10"} {"signature": "def print_source_location_info(print_fn, filename, lineno, fn_name=None,f_lasti=None, remapped_file=None):", "body": "if remapped_file:mess = '' % (remapped_file, lineno, filename)else:mess = '' % (filename, lineno)if f_lasti and f_lasti != -:mess += '' % f_lastipassmess += ''if fn_name and fn_name != '':mess += \"\" % fn_namepassprint_fn(mess)return", "docstring": "Print out a source location , e.g. the first line in\n line in:\n (/tmp.py:2 @21): \n L -- 2 import sys,os\n (trepan3k)", "id": "f7408:m6"} {"signature": "def get_int(self, arg, min_value=, default=, cmdname=None,at_most=None):", "body": "if arg is None: return defaultdefault = self.get_int_noerr(arg)if default is None:if cmdname:self.errmsg((\"\"+ \"\") % (cmdname, str(arg)))else:self.errmsg(''% str(arg))passreturn Nonepassif default < min_value:if cmdname:self.errmsg((\"\" +'')% (cmdname, min_value, default))else:self.errmsg((\"\" +'')% (min_value, default))passreturn Noneelif at_most and default > at_most:if cmdname:self.errmsg((\"\" +'')% (cmdname, at_most, default))else:self.errmsg((\"\")% (at_most, default))passpassreturn default", "docstring": "If no argument use the default. If arg is a an integer between\n least min_value and at_most, use that. Otherwise report an error.\n If there's a stack frame use that in evaluation.", "id": "f7408:c0:m11"} {"signature": "def print_source_line(msg, lineno, line, event_str=None):", "body": "return msg('' % (event_str, lineno, line))", "docstring": "Print out a source line of text , e.g. the second\n line in:\n (/tmp.py:2): \n L -- 2 import sys,os\n (trepan3k)\n\n We define this method\n specifically so it can be customized for such applications\n like ipython.", "id": "f7408:m5"} {"signature": "def read_history_file(self):", "body": "histfile = self.debugger.intf[-].histfiletry:import readlinereadline.read_history_file(histfile)except IOError:passexcept ImportError:passreturn", "docstring": "Read the command history file -- possibly.", "id": "f7408:c0:m20"} {"signature": "def run_show_bool(obj, what=None):", "body": "val = show_onoff(obj.debugger.settings[obj.name])if not what: what = obj.namereturn obj.msg(\"\" % (what, val))", "docstring": "Generic subcommand showing a boolean-valued debugger setting.\n 'obj' is generally a subcommand that has 'name' and\n 'debugger.setting' attributes.", "id": "f7409:m9"} {"signature": "def show_onoff(b):", "body": "if not isinstance(b, bool):return \"\"if b:return \"\"return \"\"", "docstring": "Return 'on' for True and 'off' for False, and ?? for anything\n else.", "id": "f7409:m11"} {"signature": "def p_list_range(self, args):", "body": "", "docstring": "range_start ::= opt_space range\nrange ::= location\nrange ::= location opt_space COMMA opt_space NUMBER\nrange ::= location opt_space COMMA opt_space OFFSET\nrange ::= COMMA opt_space location\nrange ::= location opt_space COMMA\nrange ::= location\nrange ::= DIRECTION", "id": "f7411:c1:m5"} {"signature": "def p_asm_range(self, args):", "body": "", "docstring": "arange_start ::= opt_space arange\narange ::= range\narange ::= addr_location opt_space COMMA opt_space NUMBER\narange ::= addr_location opt_space COMMA opt_space OFFSET\narange ::= addr_location opt_space COMMA opt_space ADDRESS\narange ::= location opt_space COMMA opt_space ADDRESS\narange ::= addr_location opt_space COMMA\narange ::= addr_location\n\n# Unlike ranges, We don't allow ending at an address\n# arange ::= COMMA opt_space addr_location\n\naddr_location ::= location\naddr_location ::= ADDRESS", "id": "f7411:c1:m4"} {"signature": "def t_single_quote_file(self, s):", "body": "base = s[:-]self.add_token('', base)self.pos += len(s)", "docstring": "r\"'[^'].+", "id": "f7413:c1:m5"} {"signature": "def t_direction(self, s):", "body": "self.add_token('', s)self.pos += len(s)", "docstring": "r'^[+-]$", "id": "f7413:c1:m9"} {"signature": "def t_double_quote_file(self, s):", "body": "base = s[:-]self.add_token('', base)self.pos += len(s)", "docstring": "r'\"[^\"]+", "id": "f7413:c1:m6"} {"signature": "def t_whitespace(self, s):", "body": "self.add_token('', s)self.pos += len(s)pass", "docstring": "r'\\s+", "id": "f7413:c1:m3"} {"signature": "def t_address(self, s):", "body": "pos = self.posself.add_token('', s)self.pos = pos + len(s)", "docstring": "r'[*]\\d+", "id": "f7413:c1:m12"} {"signature": "def _postprocess_options(dbg, opts):", "body": "print_events = []if opts.fntrace: print_events = ['', '', '', '']if opts.linetrace: print_events += ['']if len(print_events):dbg.settings[''] = frozenset(print_events)passfor setting in ('', '', ''):dbg.settings[setting] = getattr(opts, setting)passif getattr(opts, ''):dbg.settings[''] = opts.highlightelse:dbg.settings[''] = ''dbg.settings[''] = Noneif not opts.private:Mdebugger.debugger_obj = dbgpassif opts.post_mortem:Mapi.debugger_on_post_mortem()passreturn", "docstring": "Handle options (`opts') that feed into the debugger (`dbg')", "id": "f7414:m3"} {"signature": "def process_options(debugger_name, pkg_version, sys_argv, option_list=None):", "body": "usage_str=\"\"\"\"\"\"optparser = OptionParser(usage=usage_str, option_list=option_list,version=\"\" % pkg_version)optparser.add_option(\"\", \"\", dest=\"\",action=\"\", default=False,help=\"\"\"\")optparser.add_option(\"\", \"\", dest=\"\",action=\"\", default=False,help=\"\"\"\")optparser.add_option(\"\", dest=\"\",action=\"\", default=False,help=\"\"\"\")optparser.add_option(\"\", dest=\"\",action='',help=\"\"\"\"\"\")optparser.add_option(\"\", \"\", dest=\"\",action=\"\", type='', metavar='',help=\"\")optparser.add_option(\"\", dest=\"\",action=\"\", type='', metavar='',help=\"\")optparser.add_option(\"\", dest=\"\",action=\"\", default=True,help=\"\")optparser.add_option(\"\", dest=\"\",action=\"\", default=False,help=\"\")optparser.add_option(\"\", dest=\"\",action=\"\", default=True,help=\"\"\"\")optparser.add_option(\"\", \"\", dest=\"\", type=\"\",help=\"\" +\"\")optparser.add_option(\"\", \"\", dest=\"\", default='',action=\"\", type='', metavar='',help=\"\"\"\")optparser.add_option(\"\", dest=\"\",action=\"\", type='',metavar='',default='',help=\"\"\"\")optparser.add_option(\"\", dest=\"\",action='', default=False,help=\"\")optparser.add_option(\"\", dest=\"\",action=\"\", default=True,help=\"\")optparser.add_option(\"\", dest=\"\",action=\"\", default=True,help=\"\")optparser.add_option(\"\", dest=\"\",action='', default=True,help=(\"\"\"\"))optparser.add_option(\"\", dest=\"\",action='', default=True,help=(\"\"\"\"))optparser.add_option(\"\", \"\", dest=\"\",action=\"\", default=False,help=(\"\"\"\"))optparser.add_option(\"\", \"\", dest=\"\", metavar='',action=\"\", type='',help=(\"\"\"\"))optparser.add_option(\"\", \"\", dest=\"\", default=,action=\"\", type='',help=\"\"\"\")optparser.add_option(\"\", dest=\"\",action='',help=\"\")optparser.add_option(\"\", dest=\"\",action=\"\", default=False,help=\"\")optparser.add_option(\"\", \"\", dest=\"\",help=(\"\"\"\")),optparser.add_option(\"\", dest='', action='',default=False, help=\"\")optparser.add_option(\"\", default=, type=\"\",help=\"\")optparser.disable_interspersed_args()sys.argv = list(sys_argv)(opts, sys.argv) = optparser.parse_args()dbg_opts = {'': opts.from_ipython}dbg_initfiles = []if not opts.noexecute:add_startup_file(dbg_initfiles)if opts.command:dbg_initfiles.append(opts.command)passdbg_opts[''] = {'': dbg_initfiles}if opts.cd:os.chdir(opts.cd)passif opts.output:try:dbg_opts[''] = Moutput.DebuggerUserOutput(opts.output)except IOError:_, xxx_todo_changeme, _ = sys.exc_info()(errno, strerror) = xxx_todo_changeme.argsprint(\"\" % opts.output)print(\"\" % (errno, strerror))except:print(\"\" %opts.output)print(sys.exc_info()[])sys.exit()passpassreturn opts, dbg_opts, sys.argv", "docstring": "Handle debugger options. Set `option_list' if you are writing\n another main program and want to extend the existing set of debugger\n options.\n\n The options dicionary from optparser is returned. sys_argv is\n also updated.", "id": "f7414:m2"} {"signature": "def process_options(pkg_version, sys_argv, option_list=None):", "body": "usage_str=\"\"\"\"\"\"optparser = OptionParser(usage=usage_str, option_list=option_list,version=\"\" % pkg_version)optparser.add_option(\"\", \"\", dest=\"\", default='',action=\"\", type='', metavar='',help=\"\")optparser.add_option(\"\", \"\", dest=\"\", default=,action=\"\", type='', metavar='',help=\"\"\"\")optparser.add_option(\"\", dest=\"\", default=,action=\"\", type='', metavar='',help=\"\"\"\")optparser.disable_interspersed_args()sys.argv = list(sys_argv)(opts, sys.argv) = optparser.parse_args()return opts, sys.argv", "docstring": "Handle debugger options. Set `option_list' if you are writing\n another main program and want to extend the existing set of debugger\n options.\n\n The options dicionary from opt_parser is return. sys_argv is\n also updated.", "id": "f7415:m0"} {"signature": "def main(dbg=None, sys_argv=list(sys.argv)):", "body": "global __title__orig_sys_argv = list(sys_argv)opts, dbg_opts, sys_argv = process_options(__title__, __version__,sys_argv)dbg_opts[''] = sys_argvdbg_opts[''] = Mbullwinkle.BWInterface()dbg_opts[''] = ''if dbg is None:dbg = Mdebugger.Trepan(dbg_opts)dbg.core.add_ignore(main)pass_postprocess_options(dbg, opts)if len(sys_argv) == :mainpyfile = Noneelse:mainpyfile = sys_argv[] if not os.path.isfile(mainpyfile):mainpyfile=Mclifns.whence_file(mainpyfile)is_readable = Mfile.readable(mainpyfile)if is_readable is None:print(\"\"% (__title__, mainpyfile,))sys.exit()elif not is_readable:print(\"\"% (__title__, mainpyfile,))sys.exit()returnmainpyfile_noopt = Mfile.file_pyc2py(mainpyfile)if mainpyfile != mainpyfile_nooptand Mfile.readable(mainpyfile_noopt):print(\"\"% __title__)print(\"\" % (__title__, mainpyfile_noopt,))mainpyfile = mainpyfile_nooptpasssys.path[] = dbg.main_dirname = os.path.dirname(mainpyfile)dbg.sig_received = Falsewhile True:try:if dbg.program_sys_argv and mainpyfile:normal_termination = dbg.run_script(mainpyfile)if not normal_termination: breakelse:dbg.core.execution_status = ''dbg.core.processor.process_commands()passdbg.core.execution_status = ''dbg.intf[-].msg(\"\")dbg.core.processor.process_commands()except Mexcept.DebuggerQuit:breakexcept Mexcept.DebuggerRestart:dbg.core.execution_status = ''if dbg.program_sys_argv:sys.argv = list(dbg.program_sys_argv)part1 = ('' %dbg.core.filename(mainpyfile))args = ''.join(dbg.program_sys_argv[:])dbg.intf[-].msg(Mmisc.wrapped_lines(part1, args,dbg.settings['']))else: breakexcept SystemExit:breakpasssys.argv = orig_sys_argvreturn", "docstring": "Routine which gets run if we were invoked directly", "id": "f7416:m2"} {"signature": "def post_mortem(exc=None, frameno=, dbg=None):", "body": "if dbg is None:if Mdebugger.debugger_obj is None:Mdebugger.debugger_obj = Mdebugger.Trepan()passdbg = Mdebugger.debugger_objpassre_bogus_file = re.compile(\"\")if exc[] is None:exc = get_last_or_frame_exception()if exc[] is None:print(\"\"\"\")returnpassexc_type, exc_value, exc_tb = excdbg.core.execution_status = (''% exc_type)if exc_tb is not None:while exc_tb.tb_next is not None:filename = exc_tb.tb_frame.f_code.co_filenameif (dbg.mainpyfile and == len(dbg.mainpyfile)and not re_bogus_file.match(filename)):dbg.mainpyfile = filenamepassexc_tb = exc_tb.tb_nextpassdbg.core.processor.curframe = exc_tb.tb_framepassif == len(dbg.program_sys_argv):dbg.program_sys_argv = list(sys.argv[:])dbg.program_sys_argv[:] = [dbg.mainpyfile]try:f = exc_tb.tb_frameif f and f.f_lineno != exc_tb.tb_lineno : f = f.f_backdbg.core.processor.event_processor(f, '', exc, '')except DebuggerRestart:while True:sys.argv = list(dbg._program_sys_argv)dbg.msg(\"\"% (dbg.filename(dbg.mainpyfile),\"\".join(dbg._program_sys_argv[:])))try:dbg.run_script(dbg.mainpyfile)except DebuggerRestart:passpassexcept DebuggerQuit:passreturn", "docstring": "Enter debugger read loop after your program has crashed.\n\n exc is a triple like you get back from sys.exc_info. If no exc\n parameter, is supplied, the values from sys.last_type,\n sys.last_value, sys.last_traceback are used. And if these don't\n exist either we'll assume that sys.exc_info() contains what we\n want and frameno is the index location of where we want to start.\n\n 'frameno' specifies how many frames to ignore in the traceback.\n The default is 1, that is, we don't need to show the immediate\n call into post_mortem. If you have wrapper functions that call\n this one, you may want to increase frameno.", "id": "f7417:m3"} {"signature": "def pm(frameno=, dbg=None):", "body": "post_mortem(get_last_or_frame_exception(), frameno, dbg=dbg)return", "docstring": "Set up post-mortem debugging using the last traceback. But if\n there is no traceback, we'll assume that sys.exc_info() contains\n what we want and frameno is the index location of where we want\n to start.\n\n 'dbg', is an optional trepan.Trepan object.", "id": "f7417:m1"} {"signature": "def errmsg(self, msg):", "body": "return self.msg(msg)", "docstring": "Common routine for reporting debugger error messages.", "id": "f7418:c0:m2"} {"signature": "def close(self):", "body": "self.input.close()self.output.close()return", "docstring": "Closes both input and output", "id": "f7418:c0:m1"} {"signature": "def errmsg(self, msg, prefix=\"\"):", "body": "if not self.verbose:location = (\"\"% (self.script_name, self.input_lineno))msg = \"\" %(prefix, location, prefix, msg)else:msg = \"\" %(prefix, msg)passself.msg(msg)if self.abort_on_error:raise EOFErrorreturn", "docstring": "Common routine for reporting debugger error messages.", "id": "f7420:c0:m3"} {"signature": "def confirm(self, prompt, default):", "body": "while True:try:self.write_confirm(prompt, default)reply = self.readline('').strip().lower()except EOFError:return defaultif reply in ('', ''):return Trueelif reply in ('', ''):return Falseelse:self.msg(\"\")passpassreturn default", "docstring": "Called when a dangerous action is about to be done to make sure\n it's okay. `prompt' is printed; user response is returned.", "id": "f7421:c0:m2"} {"signature": "def state(self):", "body": "return self.inout.state", "docstring": "Return connected", "id": "f7421:c0:m11"} {"signature": "def errmsg(self, str, prefix=\"\"):", "body": "return self.msg(\"\" %(prefix, str))", "docstring": "Common routine for reporting debugger error messages.", "id": "f7421:c0:m3"} {"signature": "def msg(self, msg):", "body": "self.inout.writeline(Mcomcodes.PRINT + msg)return", "docstring": "used to write to a debugger that is connected to this\n server; `str' written will have a newline added to it", "id": "f7421:c0:m6"} {"signature": "def errmsg(self, msg, prefix=\"\"):", "body": "return self.msg(\"\" %(prefix, msg))", "docstring": "Common routine for reporting debugger error messages.", "id": "f7423:c0:m3"} {"signature": "def close(self):", "body": "try:self.input.close()self.output.close()except:passreturn", "docstring": "Closes both input and output", "id": "f7423:c0:m1"} {"signature": "def is_dark_rgb(r, g, b):", "body": "try:midpoint = int(environ.get('', None))except:passif not midpoint:term = environ.get('', None)print(\"\", midpoint, '', (* + *g + *b))midpoint = if term and term == '' else if ( (* + *g + *b) < midpoint ):return Trueelse:return False", "docstring": "Pass as parameters R G B values in hex\n On return, variable is_dark_bg is set", "id": "f7426:m1"} {"signature": "def set_default_bg():", "body": "term = environ.get('', None)if term:if (term.startswith('',) or term.startswith('')or term == ''):return Falsereturn True", "docstring": "Get bacground from\n default values based on the TERM environment variable", "id": "f7426:m0"} {"signature": "def format(self, show_enabled=True):", "body": "what = ''if show_enabled:if self.enabled:what += ''else:what += ''passpassif self.fmt:what += self.fmt + ''passwhat += self.argreturn '' % (self.number, what)", "docstring": "format display item", "id": "f7430:c1:m2"} {"signature": "def all(self):", "body": "found = Falses = []for display in self.list:if not found:s.append(\"\"\"\"\"\")found = Truepasss.append(display.format())return s", "docstring": "List all display items; return 0 if none", "id": "f7430:c0:m2"} {"signature": "def signature(frame):", "body": "if not frame: return Nonecode = frame.f_codereturn (code.co_name, code.co_filename, code.co_firstlineno)", "docstring": "return suitable frame signature to key display expressions off of.", "id": "f7430:m0"} {"signature": "def clear(self):", "body": "self.list = []return", "docstring": "Delete all display expressions", "id": "f7430:c0:m3"} {"signature": "def is_compiled_py(filename):", "body": "return True if filename[-:].lower() in ('', '') else False", "docstring": "Given a file name, return True if the suffix is pyo or pyc (an\noptimized bytecode file).", "id": "f7434:m1"} {"signature": "def lookupmodule(name):", "body": "if sys.modules.get(name):return (sys.modules[name], sys.modules[name].__file__)if os.path.isabs(name) and readable(name):return (None, name)f = os.path.join(sys.path[], name)if readable(f):return (None, f)root, ext = os.path.splitext(name)if ext == '':name = name + ''passif os.path.isabs(name):return (None, name)for dirname in sys.path:while os.path.islink(dirname):dirname = os.readlink(dirname)passfullname = os.path.join(dirname, name)if readable(fullname):return (None, fullname)passreturn (None, None)", "docstring": "lookupmodule()->(module, file) translates a possibly incomplete\n file or module name into an absolute file name. None can be\n returned for either of the values positions of module or file when\n no or module or file is found.", "id": "f7434:m3"} {"signature": "def checkfuncname(b, frame):", "body": "if not b.funcname:if b.line != frame.f_lineno:return Falsereturn Trueif frame.f_code.co_name != b.funcname:return Falseif not b.func_first_executable_line:b.func_first_executable_line = frame.f_linenoif b.func_first_executable_line != frame.f_lineno:return Falsereturn True", "docstring": "Check whether we should break here because of `b.funcname`.", "id": "f7435:m0"} {"signature": "def canonic_filename(self, frame):", "body": "return self.canonic(frame.f_code.co_filename)", "docstring": "Picks out the file name from `frame' and returns its\n canonic() value, a string.", "id": "f7436:c0:m3"} {"signature": "def start(self, opts=None):", "body": "try:self.trace_hook_suspend = Trueget_option = lambda key: Mmisc.option_set(opts, key,default.START_OPTS)add_hook_opts = get_option('')if not tracer.is_started() or get_option(''):tracer_start_opts = default.START_OPTS.copy()if opts:tracer_start_opts.update(opts.get('', {}))tracer_start_opts[''] = self.trace_dispatchtracer_start_opts[''] = add_hook_optstracer.start(tracer_start_opts)elif not tracer.find_hook(self.trace_dispatch):tracer.add_hook(self.trace_dispatch, add_hook_opts)passself.execution_status = ''finally:self.trace_hook_suspend = Falsereturn", "docstring": "We've already created a debugger object, but here we start\n debugging in earnest. We can also turn off debugging (but have\n the hooks suspended or not) using 'stop'.\n\n 'opts' is a hash of every known value you might want to set when\n starting the debugger. See START_OPTS of module default.", "id": "f7436:c0:m8"} {"signature": "def filename(self, filename=None):", "body": "if filename is None:if self.debugger.mainpyfile:filename = self.debugger.mainpyfileelse:return Noneif self.debugger.settings['']:return(os.path.basename(filename))return filename", "docstring": "Return filename or the basename of that depending on the\n basename setting", "id": "f7436:c0:m4"} {"signature": "def is_stop_here(self, frame, event, arg):", "body": "lineno = frame.f_linenofilename = frame.f_code.co_filenameif self.different_line and event == '':if self.last_lineno == lineno and self.last_filename == filename:return Falsepassself.last_lineno = linenoself.last_filename = filenameif self.stop_level is not None:if frame != self.last_frame:self.last_level = Mstack.count_frames(frame)self.last_frame = framepassif self.last_level > self.stop_level:return Falseelif self.last_level == self.stop_level andself.stop_on_finish and event in ['', '']:self.stop_level = Noneself.stop_reason = \"\"return Truepassif self._is_step_next_stop(event):self.stop_reason = ''return Truereturn False", "docstring": "Does the magic to determine if we stop here and run a\n command processor or not. If so, return True and set\n self.stop_reason; if not, return False.\n\n Determining factors can be whether a breakpoint was\n encountered, whether we are stepping, next'ing, finish'ing,\n and, if so, whether there is an ignore counter.", "id": "f7436:c0:m12"} {"signature": "def handle(self, signum, frame):", "body": "if self.print_method:self.print_method(''% self.signame)if self.print_stack:import tracebackstrings = traceback.format_stack(frame)for s in strings:if s[-] == '': s = s[:-]self.print_method(s)passpassif self.b_stop:core = self.dbgr.coreold_trace_hook_suspend = core.trace_hook_suspendcore.trace_hook_suspend = Truecore.stop_reason = ('' %(self.signame, signum))core.processor.event_processor(frame, '', signum)core.trace_hook_suspend = old_trace_hook_suspendpassif self.pass_along:if self.old_handler:self.old_handler(signum, frame)passpassreturn", "docstring": "This method is called when a signal is received.", "id": "f7437:c1:m1"} {"signature": "def lookup_signum(name):", "body": "uname = name.upper()if (uname.startswith('') and hasattr(signal, uname)):return getattr(signal, uname)else:uname = \"\"+unameif hasattr(signal, uname):return getattr(signal, uname)return Nonereturn", "docstring": "Find the corresponding signal number for 'name'. Return None\n if 'name' is invalid.", "id": "f7437:m2"} {"signature": "def handle_ignore(self, signame, set_ignore):", "body": "self.handle_pass(signame, not set_ignore)return set_ignore", "docstring": "pass' and 'noignore' are synonyms. 'nopass and 'ignore' are\n synonyms.", "id": "f7437:c0:m12"} {"signature": "def check_and_adjust_sighandler(self, signame, sigs):", "body": "signum = lookup_signum(signame)try:old_handler = signal.getsignal(signum)except ValueError:if signame in self.sigs:sigs.pop(signame)passreturn Noneif old_handler != self.sigs[signame].handle:if old_handler not in [signal.SIG_IGN, signal.SIG_DFL]:sigs[signame].old_handler = old_handlerpasstry:self._orig_set_signal(signum, self.sigs[signame].handle)except ValueError:return Falseexcept KeyError:return Falsepassreturn True", "docstring": "Check to see if a single signal handler that we are interested\nin has changed or has not been set initially. On return\nself.sigs[signame] should have our signal handler. True is\nreturned if the same or adjusted, False or None if error or\nnot found.", "id": "f7437:c0:m3"} {"signature": "def pprint_simple_array(val, displaywidth, msg_nocr, msg, lineprefix=''):", "body": "if type(val) != list:return Falsenumeric = Truefor i in range(len(val)):if not (type(val[i]) in [bool, float, int]):numeric = Falseif not (type(val[i]) in [bool, float, int, bytes]):return Falsepasspassmess = columnize([repr(v) for v in val],opts={\"\": True,\"\": lineprefix,\"\": int(displaywidth)-,'': not numeric})msg_nocr(mess)return True", "docstring": "Try to pretty print a simple case where a list is not nested.\n Return True if we can do it and False if not.", "id": "f7438:m1"} {"signature": "def dis(msg, msg_nocr, section, errmsg, x=None, start_line=-, end_line=None,relative_pos = False, highlight='', start_offset=, end_offset=None,include_header=False):", "body": "lasti = -if x is None:distb()return None, Noneif start_offset is None:start_offset = mess = ''if start_line > :mess += \"\" % start_lineelif start_offset > :mess = \"\" % start_offsetif end_line:mess += \"\" % end_lineelif end_offset:mess += \"\" % end_offsetsectioned = Falseif hasattr(types, '') and isinstance(x, types.InstanceType):x = x.__class__if inspect.ismethod(x):section(\"\" % (x, mess))sectioned = Truex = x.im_funcelif inspect.isfunction(x) or inspect.isgeneratorfunction(x):section(\"\" % (x, mess))x = x.func_codesectioned = Trueelif inspect.isgenerator(x):section(\"\" % (x, mess))frame = x.gi_framelasti = frame.f_last_ix = x.gi_codesectioned = Trueelif inspect.isframe(x):section(\"\" % (x, mess))sectioned = Trueif hasattr(x, ''):lasti = x.f_lastiif lasti == -: lasti = passopc = get_opcode(PYTHON_VERSION, IS_PYPY)x = x.f_codeif include_header:header_lines = Bytecode(x, opc).info().split(\"\")header = ''.join([format_token(Mformat.Comment, h) for h in header_lines])msg(header)passelif inspect.iscode(x):passif hasattr(x, ''): items = sorted(x.__dict__.items())for name, x1 in items:if isinstance(x1, _have_code):if not sectioned:section(\"\" % x)try:dis(msg, msg_nocr, section, errmsg, x1,start_line=start_line, end_line=end_line,relative_pos = relative_pos)msg(\"\")except TypeError:_, msg, _ = sys.exc_info()errmsg(\"\", msg)passpasspasspasselif hasattr(x, ''): if not sectioned:section(\"\" % x)return disassemble(msg, msg_nocr, section, x, lasti=lasti,start_line=start_line, end_line=end_line,relative_pos = relative_pos,highlight = highlight,start_offset = start_offset,end_offset = end_offset)elif isinstance(x, str): return disassemble_string(msg, msg_nocr, x,)else:errmsg(\"\" %type(x).__name__)return None, None", "docstring": "Disassemble classes, methods, functions, or code.\n\n With no argument, disassemble the last traceback.", "id": "f7440:m1"} {"signature": "def is_exec_stmt(frame):", "body": "return hasattr(frame, '') and get_call_function_name(frame) == ''", "docstring": "Return True if we are looking at an exec statement", "id": "f7441:m6"} {"signature": "def print_stack_trace(proc_obj, count=None, color='', opts={}):", "body": "if count is None:n=len(proc_obj.stack)else:n=min(len(proc_obj.stack), count)try:for i in range(n):print_stack_entry(proc_obj, i, color=color, opts=opts)except KeyboardInterrupt:passreturn", "docstring": "Print count entries of the stack trace", "id": "f7441:m9"} {"signature": "def next_opcode(code, offset):", "body": "n = len(code)while offset < n:op = code[offset]offset += if op >= HAVE_ARGUMENT:offset += passyield op, offsetpassyield -, -pass", "docstring": "Return the next opcode and offset as a tuple. Tuple (-100,\n -1000) is returned when reaching the end.", "id": "f7442:m2"} {"signature": "def is_class_def(line, frame):", "body": "return (line and _re_class.match(line)and stmt_contains_opcode(frame.f_code, frame.f_lineno,''))", "docstring": "Return True if we are looking at a class definition statement", "id": "f7442:m6"} {"signature": "def msg_nocr(self, msg):", "body": "self.output.write(msg)return", "docstring": "used to write to a debugger that is connected to this\n server; `str' written will not have a newline added to it", "id": "f7444:c0:m6"} {"signature": "def msg(self, msg):", "body": "if hasattr(self.output, ''):self.output.writeline(msg)elif hasattr(self.output, ''):self.output.writelines(msg + \"\")passreturn", "docstring": "used to write to a debugger that is connected to this\n server; `str' written will have a newline added to it", "id": "f7444:c0:m5"} {"signature": "def _populate_cmd_lists(self):", "body": "self.commands = {}for cmd_instance in self.cmd_instances:cmd_name = cmd_instance.nameself.commands[cmd_name] = cmd_instancepassreturn", "docstring": "Populate self.commands", "id": "f7447:c0:m15"} {"signature": "def setup(self):", "body": "self.forget()if self.settings(''):self.frame = inspect.currentframe()passif self.event in ['', '']:exc_type, exc_value, exc_traceback = self.event_argelse:_, _, exc_traceback = (None, None, None,) passif self.frame or exc_traceback:self.stack, self.curindex =get_stack(self.frame, exc_traceback, None, self)self.curframe = self.stack[self.curindex][]self.thread_name = Mthread.current_thread_name()else:self.stack = self.curframe =self.botframe = Nonepassif self.curframe:self.list_lineno =max(, inspect.getlineno(self.curframe))else:self.list_lineno = Nonepassreturn False", "docstring": "Initialization done before entering the debugger-command\n loop. In particular we set up the call stack used for local\n variable lookup and frame/up/down commands.\n\n We return True if we should NOT enter the debugger-command\n loop.", "id": "f7447:c0:m12"} {"signature": "def run(self, args):", "body": "raise NotImplementedError(NotImplementedMessage)", "docstring": "The method that implements the debugger command.\n Help on the command comes from the docstring of this method.", "id": "f7449:c0:m4"} {"signature": "def nothread_quit(self, arg):", "body": "self.debugger.core.stop()self.debugger.core.execution_status = ''self.proc.response[''] = ''self.proc.response[''] = ''self.proc.intf[-].msg(self.proc.response)raise Mexcept.DebuggerQuit", "docstring": "quit command when there's just one thread.", "id": "f7450:c0:m0"} {"signature": "def run_call(func, debug_opts=None, start_opts=None, *args, **kwds):", "body": "dbg = Mdebugger.Trepan(opts=debug_opts)try:return dbg.run_call(func, start_opts, *args, **kwds)except:Mpost_mortem.uncaught_exception(dbg)passreturn", "docstring": "Call the function (a function or method object, not a string)\n with the given arguments starting with the statement subsequent to\n the place that this appears in your program.\n\n When run_call() returns, it returns whatever the function call\n returned. The debugger prompt appears as soon as the function is\n entered.", "id": "f7455:m2"} {"signature": "def file2module(filename):", "body": "basename = osp.basename(filename)if '' in basename:pos = basename.rfind('')return basename[:pos]else:return basenamereturn None", "docstring": "Given a file name, extract the most likely module name.", "id": "f7456:m1"} {"signature": "def whence_file(py_script, dirnames=None):", "body": "if py_script.find(os.sep) != -:return py_scriptif dirnames is None:dirnames = os.environ[''].split(os.pathsep)for dirname in dirnames:py_script_try = osp.join(dirname, py_script)if osp.exists(py_script_try):return py_script_tryreturn py_script", "docstring": "Do a shell-like path lookup for py_script and return the results.\n If we can't find anything return py_script", "id": "f7456:m3"} {"signature": "def msg(self, msg, opts={}):", "body": "return(self.intf[-].msg(msg))", "docstring": "Convenience short-hand for self.debugger.intf[-1].msg", "id": "f7457:c0:m2"} {"signature": "def grouper(iterable, n, fillvalue=None):", "body": "return list(zip_longest(*[iter(iterable)] * n, fillvalue=fillvalue))", "docstring": "Group iterable by n elements.\n\n >>> for t in grouper('abcdefg', 3, fillvalue='x'):\n ... print(''.join(t))\n abc\n def\n gxx", "id": "f7460:m0"} {"signature": "def _compute_missing_rates(self, currency):", "body": "rates = self._rates[currency]tmp = defaultdict(lambda: [None, None])for date in sorted(rates):rate = rates[date]if rate is not None:closest_rate = ratedist = else:dist += tmp[date][] = closest_rate, distfor date in sorted(rates, reverse=True):rate = rates[date]if rate is not None:closest_rate = ratedist = else:dist += tmp[date][] = closest_rate, distfor date in sorted(tmp):(r0, d0), (r1, d1) = tmp[date]rates[date] = (r0 * d1 + r1 * d0) / (d0 + d1)if self.verbose:print(('''').format(currency, date, r0, d0, r1, d1))", "docstring": "Fill missing rates of a currency.\n\n This is done by linear interpolation of the two closest available rates.\n\n :param str currency: The currency to fill missing rates for.", "id": "f7462:c1:m5"} {"signature": "def _get_rate(self, currency, date):", "body": "if currency == self.ref_currency:return if date not in self._rates[currency]:first_date, last_date = self.bounds[currency]if not self.fallback_on_wrong_date:raise RateNotFoundError(''.format(date, currency, first_date, last_date))if date < first_date:fallback_date = first_dateelif date > last_date:fallback_date = last_dateelse:raise AssertionError('')if self.verbose:print(r''.format(date, currency, first_date, last_date, fallback_date))date = fallback_daterate = self._rates[currency][date]if rate is None:raise RateNotFoundError(''.format(currency, date))return rate", "docstring": "Get a rate for a given currency and date.\n\n :type date: datetime.date\n\n >>> from datetime import date\n >>> c = CurrencyConverter()\n >>> c._get_rate('USD', date=date(2014, 3, 28))\n 1.375...\n >>> c._get_rate('BGN', date=date(2010, 11, 21))\n Traceback (most recent call last):\n RateNotFoundError: BGN has no rate for 2010-11-21", "id": "f7462:c1:m6"} {"signature": "def _set_missing_to_none(self, currency):", "body": "rates = self._rates[currency]first_date, last_date = self.bounds[currency]for date in list_dates_between(first_date, last_date):if date not in rates:rates[date] = Noneif self.verbose:missing = len([r for r in itervalues(rates) if r is None])if missing:print(''.format(currency, missing, first_date, last_date, + (last_date - first_date).days))", "docstring": "Fill missing rates of a currency with the closest available ones.", "id": "f7462:c1:m4"} {"signature": "def _format_lazy(value):", "body": "args = value._proxy____argskw = value._proxy____kwif not kw and len(args) == and isinstance(args[], six.string_types):return LiteralStr(u''.format(repr(value._proxy____args[])))return value", "docstring": "Expand a _(\"TEST\") call to something meaningful.", "id": "f7468:m9"} {"signature": "def _format_object(object):", "body": "attrs = iter(object.__dict__.items())if object.__class__:attrs = chain(attrs, iter(object.__class__.__dict__.items()))is_model = isinstance(object, Model)is_form = isinstance(object, BaseForm)attrs = dict((k, v)for k, v in attrsif not k.startswith('')and not getattr(v, '', False)and not (is_model and k in ('', ''))and not (is_form and k in ('',)))for member in dir(object):try:if member.startswith('') or not hasattr(object, member):continueexcept HANDLED_EXCEPTIONS as e:attrs[member] = _format_exception(e)continuevalue = getattr(object, member)if callable(value) or member in attrs or getattr(value, '', False):continueattrs[member] = valuefor name, value in list(attrs.items()): if isinstance(value, property):attrs[name] = _try_call(lambda: getattr(object, name))elif isinstance(value, types.FunctionType):if PY3:spec = inspect.getfullargspec(value)else:spec = inspect.getargspec(value)if len(spec.args) == or len(spec.args) == len(spec.defaults or ()) + :if _is_unsafe_name(name):attrs[name] = LiteralStr('')else:attrs[name] = _try_call(lambda: value(object))else:del attrs[name]elif hasattr(value, ''):attrs[name] = value = _try_call(lambda: getattr(object, name), return_exceptions=True)if isinstance(value, Manager):attrs[name] = LiteralStr(''.format(value.__class__.__name__))elif isinstance(value, AttributeError):del attrs[name] elif isinstance(value, HANDLED_EXCEPTIONS):attrs[name] = _format_exception(value)if getattr(object, '', None) is not object.__str__:attrs[''] = _try_call(lambda: smart_str(object))elif getattr(object, '', None) is not object.__unicode__:attrs[''] = _try_call(lambda: smart_str(object))if hasattr(object, ''):attrs[''] = LiteralStr('')if hasattr(object, ''):attrs[''] = LiteralStr('')if hasattr(object, ''):attrs[''] = LiteralStr('')if hasattr(object, ''):attrs[''] = len(object)if isinstance(object, BaseForm):for field_name in list(object.fields.keys()):attrs[field_name] = object[field_name]del attrs['']return _format_dict(attrs)", "docstring": "# Instead of just printing , expand the fields.", "id": "f7468:m4"} {"signature": "def pformat_django_context_html(object):", "body": "if isinstance(object, QuerySet):text = ''lineno = for item in object.all()[:]:lineno += if lineno >= :text += u''breaktext += u''.format(escape(repr(item)))return textelif isinstance(object, Manager):return mark_safe(u'')elif isinstance(object, six.string_types):return escape(repr(object))elif isinstance(object, Promise):return escape(_format_lazy(object))elif isinstance(object, dict):return _format_dict(object)elif isinstance(object, list):return _format_list(object)elif hasattr(object, ''):return _format_object(object)else:text = DebugPrettyPrinter(width=).pformat(object)return _style_text(text)", "docstring": "Dump a variable to a HTML string with sensible output for template context fields.\nIt filters out all fields which are not usable in a template context.", "id": "f7468:m1"} {"signature": "def _format(self, object, stream, indent, allowance, context, level):", "body": "try:PrettyPrinter._format(self, object, stream, indent, allowance, context, level)except Exception as e:stream.write(_format_exception(e))", "docstring": "Recursive part of the formatting", "id": "f7468:c1:m2"} {"signature": "def _try_call(func, extra_exceptions=(), return_exceptions=False):", "body": "try:return func()except HANDLED_EXCEPTIONS as e:if return_exceptions:return eelse:return _format_exception(e)except extra_exceptions as e:if return_exceptions:return eelse:return _format_exception(e)", "docstring": "Call a method, but\n:param func:\n:type func:\n:param extra_exceptions:\n:type extra_exceptions:\n:return:\n:rtype:", "id": "f7468:m12"} {"signature": "def get_used_template(response):", "body": "if not hasattr(response, ''):return None, Nonetemplate = response.template_nameif template is None:return None, Noneif isinstance(template, (list, tuple)):if len(template) == :return template[], Noneelse:used_name = _get_used_template_name(template)return used_name, templateelif isinstance(template, six.string_types):return template, Noneelse:filename = _get_template_filename(template)template_name = ''.format(filename) if filename else ''return template_name, None", "docstring": "Get the template used in a TemplateResponse.\nThis returns a tuple of \"active choice, all choices\"", "id": "f7471:m3"} {"signature": "def _get_view_data(self, context_data):", "body": "view = context_data.get('')if not isinstance(view, View):view = Nonetemplate_context = []for key, obj in context_data.items():if isinstance(obj, (BaseForm, BaseFormSet, Model)):template_context.append((key, _format_path(obj.__class__)))return {'': _get_view_model(view),'': _get_form_class(view),'': template_context,}", "docstring": "Extract the used view from the TemplateResponse context (ContextMixin)", "id": "f7473:c0:m3"} {"signature": "def csv(self, filename=None, **format_params):", "body": "if not self.pretty:return None if filename:outfile = open(filename, '')else:outfile = StringIO()writer = UnicodeWriter(outfile, **format_params)writer.writerow(self.field_names)for row in self:writer.writerow(row)if filename:outfile.close()return CsvResultDescriptor(filename)else:return outfile.getvalue()", "docstring": "Generates results in comma-separated form. Write to ``filename``\n if given. Any other parameter will be passed on to ``csv.writer``.\n\n :param filename: if given, the CSV will be written to filename.\n\n Any additional keyword arguments will be passsed\n through to ``csv.writer``.", "id": "f7480:c2:m12"} {"signature": "def plot(self, title=None, **kwargs):", "body": "if not plt:raise ImportError(\"\")self.guess_plot_columns()self.x = self.x or range(len(self.ys[]))coords = reduce(operator.add, [(self.x, y) for y in self.ys])plot = plt.plot(*coords, **kwargs)if hasattr(self.x, ''):plt.xlabel(self.x.name)ylabel = \"\".join(y.name for y in self.ys)plt.title(title or ylabel)plt.ylabel(ylabel)return plot", "docstring": "Generates a pylab plot from the result set.\n\n ``matplotlib`` must be installed, and in an\n IPython Notebook, inlining must be on::\n\n %%matplotlib inline\n\n The first and last columns are taken as the X and Y\n values. Any columns between are ignored.\n\n :param title: plot title, defaults to names of Y value columns\n\n Any additional keyword arguments will be passsed\n through to ``matplotlib.pylab.plot``.", "id": "f7480:c2:m10"} {"signature": "def __str__(self, *arg, **kwarg):", "body": "return str(self.pretty or '')", "docstring": "String representation of a ``ResultSet``.", "id": "f7480:c2:m2"} {"signature": "def interpret_stats(results):", "body": "stats = results.statscontains_updates = stats.pop(\"\", False) if stats else Falseif not contains_updates:result = ''.format(len(results))else:result = ''for stat, value in stats.items():if value:result = \"\".format(result, value,stat.replace(\"\", \"\"))return result.strip()", "docstring": "Generates the string to be shown as updates after the execution of a\n Cypher query\n\n :param results: ``ResultSet`` with the raw results of the execution of\n the Cypher query", "id": "f7480:m1"} {"signature": "def draw(self, directed=True, layout=\"\",node_label_attr=None, show_node_labels=True,edge_label_attr=None, show_edge_labels=True,node_size=, node_color='', node_alpha=,node_text_size=,edge_color='', edge_alpha=, edge_tickness=,edge_text_pos=,text_font='', ax=None):", "body": "graph = self.get_graph(directed=directed)pos = getattr(nx, \"\".format(layout))(graph)node_labels = {}edge_labels = {}node_colors = set()if show_node_labels:for node, props in graph.nodes(data=True):labels = props.pop('', [])for label in labels:node_colors.add(label)if node_label_attr is None:node_labels[node] = \"\".format(\"\".join(labels),next(iter(props.values())) if props else \"\",)else:props_list = [\"\".format(k, v)for k, v in props.items()]node_labels[node] = \"\".format(\"\".join(labels), \"\".join(props_list))node_color = []node_colors = list(node_colors)legend_colors = []colors = list(plt.matplotlib.colors.ColorConverter().cache.items())[:]for _, color_rgb in colors[:len(node_colors)]:node_color.append(color_rgb)legend_colors.append(color_rgb)if show_edge_labels:for start, end, props in graph.edges(data=True):if edge_label_attr is None:edge_label = props.get(\"\", '')else:edge_label = props.get(edge_label_attr, '')edge_labels[(start, end)] = edge_labelif not ax:fig = plt.figure()ax = fig.add_subplot()nodes = nx.draw_networkx_nodes(graph, pos=pos, node_color=node_color,node_size=node_size, alpha=node_alpha,ax=ax)nx.draw_networkx_labels(graph, pos=pos, labels=node_labels,font_size=node_text_size,font_family=text_font,ax=ax)nx.draw_networkx_edges(graph, pos=pos, width=edge_tickness,alpha=edge_alpha, edge_color=edge_color,ax=ax)nx.draw_networkx_edge_labels(graph, pos=pos, edge_labels=edge_labels,ax=ax)ax.legend([plt.Line2D([], [], linestyle=\"\", marker=\"\",alpha=node_alpha,markersize=, markerfacecolor=color)for color in legend_colors],node_colors, loc=(-, ), numpoints=, frameon=False)ax.set_axis_off()return graph, ax, nodes", "docstring": "Plot of a NetworkX multi-graph instance\n\n :param directed: boolean, optional (default=`True`).\n Whether to return a directed graph or not.\n :param layout: string, optional (default=`\"spring\"`).\n Layout to apply. Any of the possible NetworkX layouts will work:\n ``'circular_layout'``, ``'random_layout'``, ``'shell_layout'``,\n ``'spring_layout'``, ``'spectral_layout'``,\n or ``'fruchterman_reingold_layout'``.\n :param node_label_attr: string, optional (default=`None`).\n Attribute of the nodes that has to be used as the label.\n :param show_node_labels: boolean, optional (default=`True`).\n Whether to show or not the labels of the nodes.\n :param edge_label_attr: boolean, optional (default=`None`).\n Attribute of the edges that has to be used as the label.\n :param show_edge_labels: . optional (default=`True`).\n Whether to show or not the labels of the edges.\n :param node_size: integer, optional (default=`1600`).\n Desired size for nodes.\n :param node_color: color string, or array of floats, (default=`'blue'`)\n Node color. Can be a single color format string, or a sequence of\n colors with the same length as nodelist. If numeric values are\n specified they will be mapped to colors using the ``cmap`` and\n ``vmin``, ``vmax`` parameters. See ``matplotlib.scatter`` for more\n details.\n :param node_alpha: float, optional (default=`0.3`).\n Between 0 and 1 for transparency of nodes.\n :param node_text_size: integer, optional (default=`12`).\n Size of the node text.\n :param edge_color: color string, or array of floats (default=`'blue'`)\n Edge color. Can be a single color format string, or a sequence of\n colors with the same length as edgelist. If numeric values are\n specified they will be mapped to colors using the ``edge_cmap`` and\n ``edge_vmin``, ``edge_vmax`` parameters.\n :param edge_alpha: float, optional (default=`0.3`)\n Transparency for thee edges.\n :param edge_tickness: float or integer, optional (default=`1`).\n Thickness of the lines drawn for the edges.\n :param edge_text_pos: . Default to optional (d0)=\n :param text_font: . Default to optional (default=`'sans-serif'`).\n :param ax: ``matplotlib.Figure``, optional (default=`None`).\n A ``matplotlib.Figure`` to use when rendering the graph. If `None`,\n a new object is created and returned.----\n\n :return: a ``matplotlib.Figure`` with the graph rendered.", "id": "f7480:c2:m8"} {"signature": "def clear(self):", "body": "with self._lock:try:del self._data_store[:]except Exception:pass", "docstring": "Clears all data associated with the mappers data store", "id": "f7503:c0:m7"} {"signature": "def s_add(self, path, function, method=None, type_cast=None):", "body": "with self._lock:try:path = ''.format(path.lstrip(''))path = ''.format(path.rstrip(''))path = path.replace('', '')path = path.replace('>', '')self.add(path, function, method, type_cast)except Exception:pass", "docstring": "Function for registering a simple path.\n\n Args:\n path (str): Path to be matched.\n function (function): Function to associate with this path.\n method (str, optional): Usually used to define one of GET, POST,\n PUT, DELETE. You may use whatever fits your situation though.\n Defaults to None.\n type_cast (dict, optional): Mapping between the param name and\n one of `int`, `float` or `bool`. The value reflected by the\n provided param name will than be casted to the given type.\n Defaults to None.", "id": "f7503:c0:m6"} {"signature": "def __init__(self):", "body": "self._lock = threading.RLock()self._data_store = list()", "docstring": "Create a new mapper instance.\n Using `Mapper.get()`_ is the prefered way.", "id": "f7503:c0:m0"} {"signature": "@propertydef first_item_number(self):", "body": "return self.offset + ", "docstring": ":return: The first \"item number\", used when displaying messages to the user\nlike \"Displaying items 1 to 10 of 123\" - in this example 1 would be returned", "id": "f7518:c0:m14"} {"signature": "def get_full_page_url(self, page_number, scheme=None):", "body": "args = dict(request.view_args,_external=True,)if scheme is not None:args[''] = schemeif page_number != :args[''] = page_numberreturn url_for(request.endpoint, **args)", "docstring": "Get the full, external URL for this page, optinally with the passed in URL scheme", "id": "f7518:c0:m9"} {"signature": "def format_commas(number):", "body": "if number is None:return Nonereturn ''.format(number)", "docstring": "Rounds a number and formats it with commas i.e. 123,456,789", "id": "f7519:m10"} {"signature": "def is_uk_mobile_number(phone_number):", "body": "stripped = phone_number.replace('', '')return stripped.startswith('') or stripped.startswith('')", "docstring": "Is the passed in phone number a uk mobile number", "id": "f7519:m29"} {"signature": "def format_phone_number(phone_number):", "body": "if phone_number is None:return Nonereturn PHONE_NUMBER_DISALLOWED_SYMBOLS.sub('', phone_number)", "docstring": "Removes unnecessary symbols from phone numbers", "id": "f7519:m15"} {"signature": "def make_csv_response(csv_data, filename):", "body": "resp = make_response(csv_data)resp.headers[''] = ''resp.headers[''] = '' % filenamereturn resp", "docstring": ":param csv_data: A string with the contents of a csv file in it\n:param filename: The filename that the file will be saved as when it is downloaded by the user\n:return: The response to return to the web connection", "id": "f7519:m18"} {"signature": "def remove_namespaces(root):", "body": "for elem in root.getiterator():if not hasattr(elem.tag, ''):continuei = elem.tag.find('')if i >= :elem.tag = elem.tag[i + :]objectify.deannotate(root, cleanup_namespaces=True)", "docstring": "Call this on an lxml.etree document to remove all namespaces", "id": "f7520:m0"} {"signature": "def add_months_to_date(months, date):", "body": "month = date.monthnew_month = month + monthsyears = while new_month < :new_month += years -= while new_month > :new_month -= years += year = date.year + yearstry:return datetime.date(year, new_month, date.day)except ValueError:if months > :new_month += if new_month > :new_month -= year += return datetime.datetime(year, new_month, )else:new_day = calendar.monthrange(year, new_month)[]return datetime.datetime(year, new_month, new_day)", "docstring": "Add a number of months to a date", "id": "f7521:m20"} {"signature": "def get_enable_celery_error_reporting_function(site_name, from_address):", "body": "def enable_celery_email_logging(sender, signal, logger, loglevel, logfile, format, colorize, **kwargs):from celery import current_applog.info(''.format(logger.name))send_errors = current_app.conf['']send_warnings = current_app.conf['']if send_errors or send_warnings:error_email_subject = ''.format(site_name)celery_handler = CeleryEmailHandler(from_address, current_app.conf[''],error_email_subject)if send_warnings:celery_handler.setLevel(logging.WARNING)else:celery_handler.setLevel(logging.ERROR)logger.addHandler(celery_handler)return enable_celery_email_logging", "docstring": "Use this to enable error reporting. You need to put the following in your tasks.py or wherever you\nwant to create your celery instance:\n\ncelery = Celery(__name__)\n\nenable_celery_email_logging = get_enable_celery_error_reporting_function('My Website [LIVE]', 'errors@mywebsite.com')\nafter_setup_logger.connect(enable_celery_email_logging)\nafter_setup_task_logger.connect(enable_celery_email_logging)", "id": "f7522:m0"} {"signature": "def init_celery(app, celery):", "body": "celery.conf.update(app.config)TaskBase = celery.Taskclass ContextTask(TaskBase):abstract = Truedef __call__(self, *args, **kwargs):with app.app_context():return TaskBase.__call__(self, *args, **kwargs)celery.Task = ContextTaskreturn celery", "docstring": "Initialise Celery and set up logging\n\n:param app: Flask app\n:param celery: Celery instance", "id": "f7522:m1"} {"signature": "def resize_crop_image(image, dest_w, dest_h, pad_when_tall=False):", "body": "dest_w = float(dest_w)dest_h = float(dest_h)dest_ratio = dest_w / dest_hsrc_w = float(image.size[])src_h = float(image.size[])src_ratio = src_w / src_hif src_ratio < dest_ratio:scale = dest_w / src_wscaled_w = dest_wscaled_h = src_h * scaleleft = right = dest_wtop = (scaled_h - dest_h) / bottom = top + dest_helse:scale = dest_h / src_hscaled_h = dest_hscaled_w = src_w * scaleleft = (scaled_w - dest_w) / right = left + dest_wtop = bottom = dest_hif pad_when_tall:if (bottom - top) < (scaled_h * ):log.info('')return resize_pad_image(image, dest_w, dest_h)if src_w > dest_w or src_h > dest_h:scaled_image = image.resize((int(scaled_w), int(scaled_h)), PIL.Image.ANTIALIAS)cropped_image = scaled_image.crop((int(left), int(top), int(right), int(bottom)))return cropped_imageelif scaled_w < src_w or scaled_h < src_h:cropped_image = image.crop((int(left), int(top), int(right), int(bottom)))return cropped_imageelse:return image", "docstring": ":param image: PIL.Image\n:param dest_w: Target width\n:param dest_h: Target height\n:return: Scaled and cropped image", "id": "f7523:m1"} {"signature": "def resize_image_to_fit_height(image, dest_h):", "body": "scale_factor = dest_h / image.size[]dest_w = image.size[] * scale_factorscaled_image = image.resize((int(dest_w), int(dest_h)), PIL.Image.ANTIALIAS)return scaled_image", "docstring": "Resize and image to fit the passed in height, keeping the aspect ratio the same\n\n:param image: PIL.Image\n:param dest_h: The desired height", "id": "f7523:m4"} {"signature": "def resize_image_to_fit(image, dest_w, dest_h):", "body": "dest_w = float(dest_w)dest_h = float(dest_h)dest_ratio = dest_w / dest_hsrc_w = float(image.size[])src_h = float(image.size[])src_ratio = src_w / src_hif src_ratio < dest_ratio:scale = dest_h / src_hscaled_h = dest_hscaled_w = src_w * scaleelse:scale = dest_w / src_wscaled_w = dest_wscaled_h = src_h * scalescaled_image = image.resize((int(scaled_w), int(scaled_h)), PIL.Image.ANTIALIAS)return scaled_image", "docstring": "Resize the image to fit inside dest rectangle. Resultant image may be smaller than target\n:param image: PIL.Image\n:param dest_w: Target width\n:param dest_h: Target height\n:return: Scaled image", "id": "f7523:m0"} {"signature": "def resize_image_to_fit_width(image, dest_w):", "body": "scale_factor = dest_w / image.size[]dest_h = image.size[] * scale_factorscaled_image = image.resize((int(dest_w), int(dest_h)), PIL.Image.ANTIALIAS)return scaled_image", "docstring": "Resize and image to fit the passed in width, keeping the aspect ratio the same\n\n:param image: PIL.Image\n:param dest_w: The desired width", "id": "f7523:m3"} {"signature": "def file_md5sum(filename):", "body": "hash_md5 = hashlib.md5()with open(filename, '') as f:for chunk in iter(lambda: f.read( * ), b''):hash_md5.update(chunk)return hash_md5.hexdigest()", "docstring": ":param filename: The filename of the file to process\n:returns: The MD5 hash of the file", "id": "f7525:m3"} {"signature": "def queue_email(to_addresses, from_address, subject, body, commit=True, html=True, session=None):", "body": "from models import QueuedEmailif session is None:session = _db.sessionlog.info('' % (to_addresses, subject))queued_email = QueuedEmail(html, to_addresses, from_address, subject, body, STATUS_QUEUED)session.add(queued_email)session.commit()return queued_email", "docstring": "Add a mail to the queue to be sent.\n\nWARNING: Commits by default!\n\n:param to_addresses: The names and addresses to send the email to, i.e. \"Steve, info@fig14.com\"\n:param from_address: Who the email is from i.e. \"Stephen Brown \"\n:param subject: The email subject\n:param body: The html / text body of the email\n:param commit: Whether to commit to the database\n:param html: Is this a html email?\n:param session: The sqlalchemy session or None to use db.session", "id": "f7527:m1"} {"signature": "def init(db, queued_email_model_class):", "body": "global _db, _QueuedEmail_db = db_QueuedEmail = queued_email_model_class", "docstring": ":param db: Flask-SQLAlchemy database object\n:param queued_email_model_class: Model class (see above example)", "id": "f7527:m0"} {"signature": "def blend_html_colour_to_white(html_colour, alpha):", "body": "html_colour = html_colour.upper()has_hash = Falseif html_colour[] == '':has_hash = Truehtml_colour = html_colour[:]r_str = html_colour[:]g_str = html_colour[:]b_str = html_colour[:]r = int(r_str, )g = int(g_str, )b = int(b_str, )r = int(alpha * r + ( - alpha) * )g = int(alpha * g + ( - alpha) * )b = int(alpha * b + ( - alpha) * )out = ''.format(r, g, b)if has_hash:out = '' + outreturn out", "docstring": ":param html_colour: Colour string like FF552B or #334455\n:param alpha: Alpha value\n:return: Html colour alpha blended onto white", "id": "f7532:m3"} {"signature": "def print_location(**kwargs):", "body": "stack = inspect.stack()[]debug_print(''.format(stack[], stack[], stack[]))for k, v in kwargs.items():lesser_debug_print(''.format(k, v))", "docstring": ":param kwargs: Pass in the arguments to the function and they will be printed too!", "id": "f7535:m4"} {"signature": "def service(self):", "body": "with self.lock:for key in list(self.attempts.keys()):log.debug('' % key)if key in self.attempts:if self.attempts[key] <= :del self.attempts[key]else:self.attempts[key] -= now = datetime.datetime.utcnow()for key in list(self.locks.keys()):if key in self.locks and self.locks[key] < now:log.info('' % key)del self.locks[key]", "docstring": "Decrease the countdowns, and remove any expired locks. Should be called once every seconds.", "id": "f7539:c0:m3"} {"signature": "def _fetch_index_package_info(self, package_name, current_version):", "body": "try:package_canonical_name = package_nameif self.PYPI_API_TYPE == '':package_canonical_name = canonicalize_name(package_name)response = requests.get(self.PYPI_API_URL.format(package=package_canonical_name), timeout=)except HTTPError as e: return False, e.messageif not response.ok: return False, ''.format(response.reason)if self.PYPI_API_TYPE == '':return self._parse_pypi_json_package_info(package_name, current_version, response)elif self.PYPI_API_TYPE == '':return self._parse_simple_html_package_info(package_name, current_version, response)else: raise NotImplementedError('')", "docstring": ":type package_name: str\n:type current_version: version.Version", "id": "f7549:c0:m4"} {"signature": "def _parse_simple_html_package_info(self, package_name, current_version, response):", "body": "pattern = r''.format(name=re.escape(package_name))versions_match = re.findall(pattern, response.content.decode(''), flags=re.IGNORECASE)all_versions = [version.parse(vers) for vers in versions_match]filtered_versions = [vers for vers in all_versions if not vers.is_prerelease and not vers.is_postrelease]if not filtered_versions: return False, ''latest_version = max(filtered_versions)if self._prerelease or current_version.is_postrelease or current_version.is_prerelease:prerelease_versions = [vers for vers in all_versions if vers.is_prerelease or vers.is_postrelease]if prerelease_versions:latest_version = max(prerelease_versions)return {'': package_name,'': current_version,'': latest_version,'': current_version < latest_version,'': ''}, ''", "docstring": ":type package_name: str\n:type current_version: version.Version\n:type response: requests.models.Response", "id": "f7549:c0:m7"} {"signature": "def init_app(self, app):", "body": "app.config.setdefault('', '')app.config.setdefault('', None)app.config.setdefault('', None)app.config.setdefault('', None)app.config.setdefault('', )app.config.setdefault('', None)app.config.setdefault('', )app.config.setdefault('', None)app.config.setdefault('', True)app.config.setdefault('', '')app.config.setdefault('', None)app.config.setdefault('', None)if hasattr(app, ''):app.teardown_appcontext(self.teardown)", "docstring": "Initialize the `app` for use with this\n :class:`~flask_mysqldb.MySQL` class.\n This is called automatically if `app` is passed to\n :meth:`~MySQL.__init__`.\n\n :param flask.Flask app: the application to configure for use with\n this :class:`~flask_mysqldb.MySQL` class.", "id": "f7552:c0:m1"} {"signature": "def get_template(name):", "body": "text = re.sub(r'', r'', name)text = re.sub(r'', r'', text)return text", "docstring": "Still unsure about best way to do this, hence cruft.", "id": "f7568:m0"} {"signature": "def plot(self, fmt=None):", "body": "for d in self.__list:d.plot(fmt=fmt)return None", "docstring": "Make a simple plot of the legend.\n\nSimply calls Decor.plot() on all of its members.\n\nTODO: Build a more attractive plot.", "id": "f7569:c2:m24"} {"signature": "def get_colour(self, c, default='', match_only=None):", "body": "return self.getattr(c=c,attr='',default=default,match_only=match_only)", "docstring": "Get the display colour of a component. Wraps `getattr()`.\n\nDevelopment note:\n Cannot define this as a `partial()` because I want\n to maintain the order of arguments in `getattr()`.\n\nArgs:\n c (component): The component to look up.\n default (str): The colour to return in the event of no match.\n match_only (list of str): The component attributes to include in the\n comparison. Default: All of them.\n\nReturns:\n str. The hex string of the matching Decor in the Legend.", "id": "f7569:c2:m21"} {"signature": "def _repr_html_(self):", "body": "all_keys = list(set(itertools.chain(*[d.keys for d in self])))rows = ''for decor in self:th, tr = decor._repr_html_row_(keys=all_keys)rows += ''.format(tr)header = ''.format(th)html = ''.format(header, rows)return html", "docstring": "Jupyter Notebook magic repr function.", "id": "f7569:c2:m11"} {"signature": "@classmethoddef builtin_timescale(cls, name):", "body": "names = {'': TIMESCALE__ISC,'': TIMESCALE__USGS_ISC,'': TIMESCALE__DNAG,}return cls.from_csv(text=names[name.lower()])", "docstring": "Generate a default timescale legend. No arguments.\n\nReturns:\n Legend: The timescale stored in `defaults.py`.", "id": "f7569:c2:m13"} {"signature": "@propertydef span(self):", "body": "return self.lower, self.upper", "docstring": "Property. A tuple of lower, upper. Provided for convenience.", "id": "f7571:c2:m8"} {"signature": "@propertydef z(self):", "body": "return self.__dict__.get('', (self.upper + self.lower)/)", "docstring": "Property. Guaranteed to give the 'middle' depth, either as defined,\nor the average of the upper and lower bounds.", "id": "f7571:c2:m6"} {"signature": "def __hash__(self):", "body": "return hash(frozenset(self.__dict__.keys()))", "docstring": "If we define __eq__ we also need __hash__ otherwise the object\nbecomes unhashable. All this does is hash the frozenset of the\nkeys. (You can only hash immutables.)", "id": "f7576:c1:m11"} {"signature": "def _repr_html_(self):", "body": "rows = ''s = ''for k, v in self.__dict__.items():rows += s.format(k=k, v=v)html = ''.format(rows)return html", "docstring": "Jupyter Notebook magic repr function.", "id": "f7576:c1:m13"} {"signature": "@classmethoddef from_text(cls, text, lexicon, required=None, first_only=True):", "body": "component = lexicon.get_component(text, first_only=first_only)if required and (required not in component):return Noneelse:return cls(component)", "docstring": "Generate a Component from a text string, using a Lexicon.\n\nArgs:\n text (str): The text string to parse.\n lexicon (Lexicon): The dictionary to use for the\n categories and lexemes.\n required (str): An attribute that we must have. If a required\n attribute is missing from the component, then None is returned.\n first_only (bool): Whether to only take the first\n match of a lexeme against the text string.\n\nReturns:\n Component: A Component object, or None if there was no\n must-have field.", "id": "f7576:c1:m15"} {"signature": "def _process_row(text, columns):", "body": "if not text:returncoldict = {k: {'': s,'': l,'': r,'': w} for k, (s, l, r, w) in columns.items()}item = {}for field in coldict:value = _get_field(text, coldict, field)if value is not None:item[field] = valuereturn item", "docstring": "Processes a single row from the file.", "id": "f7578:m4"} {"signature": "def _combine(self, old_self, other, blend=True):", "body": "if blend:self.components = old_self.components.copy()for c in other.components:if c not in self.components:self.components.append(c)self.description = old_self._blend_descriptions(other)self.data = old_self._combine_data(other)else:self.components = other.componentsself.description = other.descriptionself.data = other.datareturn self", "docstring": "Private method. Combines data, components, and descriptions but\nnothing else.\n\nArgs:\n old_self (Interval): You have to pass the instance explicitly.\n other (Interval): The other Interval.\n blend (bool): Whether to blend or not.\n\nReturns:\n Interval. The combined description.", "id": "f7579:c1:m25"} {"signature": "@propertydef thickness(self):", "body": "return abs(self.base.z - self.top.z)", "docstring": "Returns the thickness of the interval.\n\nReturns:\n Float: The thickness.", "id": "f7579:c1:m10"} {"signature": "def _blend_descriptions(self, other):", "body": "thin, thick = sorted([self, other], key=lambda k: k.thickness)total = thin.thickness + thick.thicknessprop = * thick.thickness / totalif self.components == other.components:return self.description.strip('')d1 = thick.description.strip('') or thick.summary()d2 = thin.description.strip('') or thin.summary()if d1:d = ''.format(prop, d1, -prop, d2)else:d = ''return d", "docstring": "Private method. Computes the description for combining two intervals.\nMake sure that the intervals are already adjusted to the correct\nthicknesses.\n\nArgs:\n other (Interval): The other Interval.\n\nReturns:\n str. The blended description.", "id": "f7579:c1:m24"} {"signature": "@propertydef primary(self):", "body": "if self.components:return self.components[]else:return None", "docstring": "Convenience function returning the first component.\n\nReturns:\n Component. The first one in the list of components.", "id": "f7579:c1:m9"} {"signature": "def __add__(self, other):", "body": "if isinstance(other, self.__class__):return self.union(other)elif isinstance(other, Component):top = self.top.zbase = self.base.zd = self.description + '' + other.summary()c = self.components + [other]data = self._combine_data(other)return Interval(top, base, description=d, data=data, components=c)else:m = \"\"raise IntervalError(m)", "docstring": "TODO:\n If adding components, should take account of 'amount', if present.\n Or 'proportion'? ...Could be specified by lexicon??", "id": "f7579:c1:m4"} {"signature": "def intersect(self, other, blend=True):", "body": "if not self.any_overlaps(other):m = ''raise IntervalError(m)_, intersection, _ = self._explode(other)return intersection._combine(self, other, blend=blend)", "docstring": "Perform the intersection binary operation. self must at least\npartially overlap with other or an IntervalError is raised.\n\nIf blend is False, you are essentially replacing self with other.\n\nArgs:\n other (Interval): The other Interval.\n blend (bool): Whether to blend or not.\n\nReturns:\n Interval. The intersection of the Interval with the one provided.", "id": "f7579:c1:m26"} {"signature": "def union(self, other, blend=True):", "body": "if not (self.touches(other) or self.any_overlaps(other)):return self, otherif self.order == '':top = max(self.top.z, other.top.z)bot = min(self.base.z, other.base.z)else:top = min(self.top.z, other.top.z)bot = max(self.base.z, other.base.z)result = self.copy()result.top = topresult.base = botreturn result._combine(self, other, blend=blend)", "docstring": "Perform the union binary operation. self must at least touch other or\nan IntervalError is raised.\n\nIf blend is False, you are essentially replacing self with other.\n\nArgs:\n other (Interval): The other Interval.\n blend (bool): Whether to blend or not.\n\nReturns:\n Interval. The union of the Interval with the one provided.", "id": "f7579:c1:m28"} {"signature": "def invert(self, copy=False):", "body": "if copy:d = self.__dict__.copy()del(d[''])del(d[''])self.base.invert()self.top.invert()return Interval(top=self.base, base=self.top, **d)else:self.base.invert()self.top.invert()old_base = self.baseself.base = self.topself.top = old_basereturn", "docstring": "Inverts the interval. If it was depth-ordered (positive numbers\nincreasing downwards.), it will now be elevation-ordered, and\nvice versa.\n\nArgs:\n copy (bool): Whether to make a copy or not. Default: False.", "id": "f7579:c1:m16"} {"signature": "def hex_to_name(hexx):", "body": "for n, h in defaults.COLOURS.items():if (len(n) > ) and (h == hexx.upper()):return n.lower()return None", "docstring": "Convert hex to a color name, using matplotlib's colour names.\n\nArgs:\n hexx (str): A hexadecimal colour, starting with '#'.\n\nReturns:\n str: The name of the colour, or None if not found.", "id": "f7580:m7"} {"signature": "def flatten_list(l):", "body": "if (l == []) or (l is None):return lif isinstance(l[], list):return flatten_list(l[]) + flatten_list(l[:])return l[:] + flatten_list(l[:])", "docstring": "Unpacks lists in a list:\n\n [1, 2, [3, 4], [5, [6, 7]]]\n\nbecomes\n\n [1, 2, 3, 4, 5, 6, 7]\n\nhttp://stackoverflow.com/a/12472564/3381305", "id": "f7580:m16"} {"signature": "def tops_from_loglike(a, offset=, null=None):", "body": "a = np.copy(a)try:contains_nans = np.isnan(a).any()except:contains_nans = Falseif contains_nans:_null = null or -while _null in a:_null -= try:a[np.isnan(a)] = _nulltransformed = Trueexcept:transformed = Falseedges = a[:] == a[:-]edges = np.append(True, edges)tops = np.where(~edges)[]tops = np.append(, tops)values = a[tops + offset]if contains_nans and transformed:values[values == _null] = np.nanreturn tops, values", "docstring": "Take a log-like stream of numbers or strings, and return two arrays:\none of the tops (changes), and one of the values from the stream.\n\nArgs:\n loglike (array-like): The input stream of loglike data.\n offset (int): Offset (down) from top at which to get lithology,\n to be sure of getting 'clean' pixels.\n\nReturns:\n ndarray: Two arrays, tops and values.", "id": "f7580:m14"} {"signature": "def list_and_add(a, b):", "body": "if not isinstance(b, list):b = [b]if not isinstance(a, list):a = [a]return a + b", "docstring": "Coerce to lists and concatenate.\n\nArgs:\n a: A thing.\n b: A thing.\n\nReturns:\n List. All the things.", "id": "f7580:m15"} {"signature": "def null_default(x):", "body": "def null(y):return xreturn null", "docstring": "Null function. Used for default in functions that can apply a user-\nsupplied function to data before returning.", "id": "f7580:m4"} {"signature": "def hex_to_rgb(hexx):", "body": "h = hexx.strip('')l = len(h)return tuple(int(h[i:i+l//], ) for i in range(, l, l//))", "docstring": "Utility function to convert hex to (r,g,b) triples.\nhttp://ageo.co/1CFxXpO\n\nArgs:\n hexx (str): A hexadecimal colour, starting with '#'.\n\nReturns:\n tuple: The equivalent RGB triple, in the range 0 to 255.", "id": "f7580:m10"} {"signature": "def axis_transform(ax, x, y, xlim=None, ylim=None, inverse=False):", "body": "xlim = xlim or ax.get_xlim()ylim = ylim or ax.get_ylim()xdelta = xlim[] - xlim[]ydelta = ylim[] - ylim[]if not inverse:xout = xlim[] + x * xdeltayout = ylim[] + y * ydeltaelse:xdelta2 = x - xlim[]ydelta2 = y - ylim[]xout = xdelta2 / xdeltayout = ydelta2 / ydeltareturn xout, yout", "docstring": "http://stackoverflow.com/questions/29107800\n\ninverse = False : Axis => Data\n = True : Data => Axis", "id": "f7580:m17"} {"signature": "def rgb_to_hex(rgb):", "body": "r, g, b = rgb[:]if (r < ) or (g < ) or (b < ):raise Exception(\"\")if (r > ) or (g > ) or (b > ):raise Exception(\"\")if ( < r < ) or ( < g < ) or ( < b < ):if (r > ) or (g > ) or (b > ):raise Exception(\"\")if ( <= r <= ) and ( <= g <= ) and ( <= b <= ):rgb = tuple([int(round(val * )) for val in [r, g, b]])else:rgb = (int(r), int(g), int(b))result = '' % rgbreturn result.lower()", "docstring": "Utility function to convert (r,g,b) triples to hex.\nhttp://ageo.co/1CFxXpO\nArgs:\n rgb (tuple): A sequence of RGB values in the\n range 0-255 or 0-1.\nReturns:\n str: The hex code for the colour.", "id": "f7580:m9"} {"signature": "def get_field(self, field_name, args, kwargs):", "body": "try:s = super(CustomFormatter, self)return s.get_field(field_name, args, kwargs)except KeyError: return (\"\", field_name)except IndexError: return (\"\", field_name)", "docstring": "Return an underscore if the attribute is absent.\nNot all components have the same attributes.", "id": "f7580:c0:m1"} {"signature": "def split_description(self, text):", "body": "t = re.sub(r'', r'', text) t = re.sub(r'', r'', t) words = getattr(self, '')try:splitter = words[].strip()except:splitter = ''t = re.sub(r'', r''+splitter+'', t)f = re.IGNORECASEpattern = re.compile(r'' + r''.join(words) + r'', flags=f)parts = filter(None, pattern.split(t))return [i.strip() for i in parts]", "docstring": "Split a description into parts, each of which can be turned into\na single component.", "id": "f7582:c1:m9"} {"signature": "def next(self):", "body": "return self.__next__()", "docstring": "For Python 2 compatibility.", "id": "f7583:c1:m10"} {"signature": "def crop(self, extent, copy=False):", "body": "try:if extent[] is None:extent = (self.start.z, extent[])if extent[] is None:extent = (extent[], self.stop.z)except:m = \"\"m += \"\"raise StriplogError(m)first_ix = self.read_at(extent[], index=True)last_ix = self.read_at(extent[], index=True)first = self[first_ix].split_at(extent[])[]last = self[last_ix].split_at(extent[])[]new_list = self.__list[first_ix:last_ix+].copy()new_list[] = firstnew_list[-] = lastif copy:return Striplog(new_list)else:self.__list = new_listreturn", "docstring": "Crop to a new depth range.\n\nArgs:\n extent (tuple): The new start and stop depth. Must be 'inside'\n existing striplog.\n copy (bool): Whether to operate in place or make a copy.\n\nReturns:\n Operates in place by deault; if copy is True, returns a striplog.", "id": "f7583:c1:m67"} {"signature": "def merge_overlaps(self):", "body": "overlaps = np.array(self.find_overlaps(index=True))if not overlaps.any():returnfor overlap in overlaps:before = self[overlap].copy()after = self[overlap + ].copy()del self[overlap]del self[overlap]new_segment = before.merge(after)self.__insert(overlap, new_segment)overlaps += return", "docstring": "Merges overlaps by merging overlapping Intervals.\n\nThe function takes no arguments and returns ``None``. It operates on\nthe striplog 'in place'\n\nTODO: This function will not work if any interval overlaps more than\n one other intervals at either its base or top.", "id": "f7583:c1:m60"} {"signature": "def __next__(self):", "body": "try:result = self.__list[self.__index]except IndexError:self.__index = raise StopIterationself.__index += return result", "docstring": "Supports iterable.", "id": "f7583:c1:m9"} {"signature": "@classmethoddef __intervals_from_tops(self,tops,values,basis,components,field=None,ignore_nan=True):", "body": "length = float(basis.size)start, stop = basis[], basis[-]tops = [start + (p/(length-)) * (stop-start) for p in tops]bases = tops[:] + [stop]list_of_Intervals = []for i, t in enumerate(tops):v, c, d = values[i], [], {}if ignore_nan and np.isnan(v):continueif (field is not None):d = {field: v}if components is not None:try:c = [deepcopy(components[int(v)])]except IndexError:c = []if c and (c[] is None):c = []interval = Interval(t, bases[i], data=d, components=c)list_of_Intervals.append(interval)return list_of_Intervals", "docstring": "Private method. Take a sequence of tops in an arbitrary dimension,\nand provide a list of intervals from which a striplog can be made.\n\nThis is only intended to be used by ``from_image()``.\n\nArgs:\n tops (iterable). A list of floats.\n values (iterable). A list of values to look up.\n basis (iterable). A list of components.\n components (iterable). A list of Components.\n\nReturns:\n List. A list of Intervals.", "id": "f7583:c1:m23"} {"signature": "@propertydef start(self):", "body": "if self.order == '':return self[].topelse:return self[-].base", "docstring": "Property. The closest Position to the datum.\n\nReturns:\n Position.", "id": "f7583:c1:m14"} {"signature": "@propertydef mean(self):", "body": "return self.cum / len(self)", "docstring": "Property. Returns the mean thickness of all filled intervals.\n\nReturns:\n Float. The mean average of interval thickness.", "id": "f7583:c1:m19"} {"signature": "def read_at(self, d, index=False):", "body": "for i, iv in enumerate(self):if iv.spans(d):return i if index else ivreturn None", "docstring": "Get the index of the interval at a particular 'depth' (though this\n might be an elevation or age or anything).\n\nArgs:\n d (Number): The 'depth' to query.\n index (bool): Whether to return the index instead of the interval.\n\nReturns:\n Interval: The interval, or if ``index==True`` the index of the\n interval, at the specified 'depth', or ``None`` if the depth is\n outside the striplog's range.", "id": "f7583:c1:m48"} {"signature": "def get_data(self, field, function=None, default=None):", "body": "f = function or utils.nulldata = []for iv in self:d = iv.data.get(field)if d is None:if default is not None:d = defaultelse:d = np.nandata.append(f(d))return np.array(data)", "docstring": "Get data from the striplog.", "id": "f7583:c1:m46"} {"signature": "def merge_neighbours(self, strict=True):", "body": "new_strip = [self[].copy()]for lower in self[:]:touching = new_strip[-].touches(lower)if strict:similar = new_strip[-].components == lower.componentselse:similar = new_strip[-].primary == lower.primaryif touching and similar:new_strip[-] = new_strip[-].union(lower)else:new_strip.append(lower.copy())return Striplog(new_strip)", "docstring": "Makes a new striplog in which matching neighbours (for which the\ncomponents are the same) are unioned. That is, they are replaced by\na new Interval with the same top as the uppermost and the same bottom\nas the lowermost.\n\nArgs\n strict (bool): If True, then all of the components must match.\n If False, then only the primary must match.\n\nReturns:\n Striplog. A new striplog.\n\nTODO:\n Might need to be tweaked to deal with 'binary striplogs' if those\n aren't implemented with components.", "id": "f7583:c1:m61"} {"signature": "@classmethoddef _from_array(cls, a,lexicon=None,source=\"\",points=False,abbreviations=False):", "body": "with warnings.catch_warnings():warnings.simplefilter(\"\")w = \"\"warnings.warn(w, DeprecationWarning, stacklevel=)csv_text = ''for interval in a:interval = [str(i) for i in interval]if (len(interval) < ) or (len(interval) > ):raise StriplogError('')descr = interval[-].strip('')interval[-] = '' + descr + ''csv_text += ''.join(interval) + ''return cls.from_descriptions(csv_text,lexicon,source=source,points=points,abbreviations=abbreviations)", "docstring": "DEPRECATING.\n\nTurn an array-like into a Striplog. It should have the following\nformat (where ``base`` is optional):\n\n [(top, base, description),\n (top, base, description),\n ...\n ]\n\nArgs:\n a (array-like): A list of lists or of tuples, or an array.\n lexicon (Lexicon): A language dictionary to extract structured\n objects from the descriptions.\n source (str): The source of the data. Default: ''.\n points (bool): Whether to treat as point data. Default: False.\n\nReturns:\n Striplog: The ``striplog`` object.", "id": "f7583:c1:m31"} {"signature": "def invert(self, copy=False):", "body": "if copy:return Striplog([i.invert(copy=True) for i in self])else:for i in self:i.invert()self.__sort()o = self.orderself.order = {'': '', '': ''}[o]return", "docstring": "Inverts the striplog, changing its order and the order of its contents.\n\nOperates in place by default.\n\nArgs:\n copy (bool): Whether to operate in place or make a copy.\n\nReturns:\n None if operating in-place, or an inverted copy of the striplog\n if not.", "id": "f7583:c1:m66"} {"signature": "def intersect(self, other):", "body": "if not isinstance(other, self.__class__):m = \"\"raise StriplogError(m)result = []for iv in self:for jv in other:try:result.append(iv.intersect(jv))except IntervalError:passreturn Striplog(result)", "docstring": "Makes a striplog of all intersections.\n\nArgs:\n Striplog. The striplog instance to intersect with.\n\nReturns:\n Striplog. The result of the intersection.", "id": "f7583:c1:m59"} {"signature": "def bar(self, height='', sort=False, reverse=False,legend=None, ax=None, figsize=None, **kwargs):", "body": "if sort:if sort is True:def func(x): return x.thicknessreverse = Truedata = sorted(self, key=func, reverse=reverse)else:data = self[:]if ax is None:fig, ax = plt.subplots(figsize=figsize)heights = [getattr(i, height) for i in data]comps = [i[] for i in self.unique]if legend is None:legend = Legend.random(comps)colors = [legend.get_colour(i.primary) for i in data]bars = ax.bar(range(len(data)), height=heights, color=colors, **kwargs)colourables = [i.primary.summary() for i in data]unique_bars = dict(zip(colourables, bars))ax.legend(unique_bars.values(), unique_bars.keys())ax.set_ylabel(height.title())return ax", "docstring": "Make a bar plot of thickness per interval.\n\nArgs:\n height (str): The property of the primary component to plot.\n sort (bool or function): Either pass a boolean indicating whether\n to reverse sort by thickness, or pass a function to be used as\n the sort key.\n reverse (bool): Reverses the sort order.\n legend (Legend): The legend to plot with.\n ax (axis): Optional axis to plot to.\n figsize (tuple): A figure size, (width, height), optional.\n **kwargs: passed to the matplotlib bar plot command, ax.bar().\n\nReturns:\n axis: If you sent an axis in, you get it back.", "id": "f7583:c1:m65"} {"signature": "def anneal(self):", "body": "strip = self.copy()gaps = strip.find_gaps(index=True)if not gaps:returnfor gap in gaps:before = strip[gap]after = strip[gap + ]if strip.order == '':t = (after.top.z-before.base.z)/before.base = before.base.z + tafter.top = after.top.z - telse:t = (after.base-before.top)/before.top = before.top.z + tafter.base = after.base.z - treturn strip", "docstring": "Fill in empty intervals by growing from top and base.\n\nNote that this operation happens in-place and destroys any information\nabout the ``Position`` (e.g. metadata associated with the top or base).\nSee GitHub issue #54.", "id": "f7583:c1:m56"} {"signature": "def __strict(self):", "body": "def conc(a, b):return a + bb = np.array(reduce(conc, [[i.top.z, i.base.z] for i in self]))return all(np.diff(b) >= )", "docstring": "Private method. Checks if striplog is monotonically increasing in\ndepth.\n\nReturns:\n Bool.", "id": "f7583:c1:m17"} {"signature": "def to_canstrat(self, filename, params):", "body": "return None", "docstring": "Write a Canstrat ASCII file.\n\nArgs:\n filename (str)\n params (dict): The well details. You can use a ``welly`` header\n object.\n\nReturns:", "id": "f7583:c1:m36"} {"signature": "def fill(self, component=None):", "body": "c = [component] if component is not None else []gaps = self.find_gaps()if not gaps:return selffor iv in gaps:iv.components = creturn deepcopy(self) + gaps", "docstring": "Fill gaps with the component provided.\n\nExample\n t = s.fill(Component({'lithology': 'cheese'}))", "id": "f7583:c1:m57"} {"signature": "def find(self, search_term, index=False):", "body": "hits = []for i, iv in enumerate(self):try:search_text = iv.description or iv.primary.summary()pattern = re.compile(search_term, flags=re.IGNORECASE)if pattern.search(search_text):hits.append(i)except TypeError:if search_term in iv.components:hits.append(i)if hits and index:return hitselif hits:return self[hits]else:return", "docstring": "Look for a regex expression in the descriptions of the striplog.\nIf there's no description, it looks in the summaries.\n\nIf you pass a Component, then it will search the components, not the\ndescriptions or summaries.\n\nCase insensitive.\n\nArgs:\n search_term (string or Component): The thing you want to search\n for. Strings are treated as regular expressions.\n index (bool): Whether to return the index instead of the interval.\nReturns:\n Striplog: A striplog that contains only the 'hit' Intervals.\n However, if ``index`` was ``True``, then that's what you get.", "id": "f7583:c1:m51"} {"signature": "def find_gaps(self, index=False):", "body": "return self.__find_incongruities(op=operator.lt, index=index)", "docstring": "Finds gaps in a striplog.\n\nArgs:\n index (bool): If True, returns indices of intervals with\n gaps after them.\n\nReturns:\n Striplog: A striplog of all the gaps. A sort of anti-striplog.", "id": "f7583:c1:m54"} {"signature": "def to_flag(self, **kwargs):", "body": "return self.to_log(**kwargs).astype(bool)", "docstring": "A wrapper for ``to_log()`` that returns a boolean array.\nUseful for masking. Has the same interface as ``to_log()``.", "id": "f7583:c1:m40"} {"signature": "def _ensure_wrappability(fn):", "body": "if isinstance(fn, (type(object.__init__), type(object.__call__))):wrappable_fn = lambda *args, **kwargs: fn(*args, **kwargs)wrappable_fn.__name__ = fn.__name__wrappable_fn.__doc__ = fn.__doc__wrappable_fn.__module__ = '' wrappable_fn.__wrapped__ = fnreturn wrappable_fnreturn fn", "docstring": "Make sure `fn` can be wrapped cleanly by functools.wraps.", "id": "f7587:m1"} {"signature": "def _is_literally_representable(value):", "body": "return _format_value(value) is not None", "docstring": "Returns `True` if `value` can be (parseably) represented as a string.\n\n Args:\n value: The value to check.\n\n Returns:\n `True` when `value` can be represented as a string parseable by\n `parse_literal`, `False` otherwise.", "id": "f7587:m7"} {"signature": "def iterate_references(config, to=None):", "body": "for value in _iterate_flattened_values(config):if isinstance(value, ConfigurableReference):if to is None or value.configurable.fn_or_cls == to:yield value", "docstring": "Provides an iterator over references in the given config.\n\n Args:\n config: A dictionary mapping scoped configurable names to argument bindings.\n to: If supplied, only yield references whose `configurable_fn` matches `to`.\n\n Yields:\n `ConfigurableReference` instances within `config`, maybe restricted to those\n matching the `to` parameter if it is supplied.", "id": "f7587:m38"} {"signature": "def query_parameter(binding_key):", "body": "pbk = ParsedBindingKey(binding_key)if pbk.config_key not in _CONFIG:err_str = \"\"raise ValueError(err_str.format(pbk.given_selector))if pbk.arg_name not in _CONFIG[pbk.config_key]:err_str = \"\"raise ValueError(err_str.format(pbk.given_selector, pbk.arg_name))return _CONFIG[pbk.config_key][pbk.arg_name]", "docstring": "Returns the currently bound value to the specified `binding_key`.\n\n The `binding_key` argument should look like\n 'maybe/some/scope/maybe.moduels.configurable_name.parameter_name'. Note that\n this will not include default parameters.\n\n Args:\n binding_key: The parameter whose value should be set.\n\n Returns:\n The value bound to the configurable/parameter combination given in\n `binding_key`.\n\n Raises:\n ValueError: If no function can be found matching the configurable name\n specified by `biding_key`, or if the specified parameter name is\n blacklisted or not in the function's whitelist (if present) or if there is\n no value bound for the queried parameter or configurable.", "id": "f7587:m10"} {"signature": "def _format_value(value):", "body": "literal = repr(value)try:if parse_value(literal) == value:return literalexcept SyntaxError:passreturn None", "docstring": "Returns `value` in a format parseable by `parse_value`, or `None`.\n\n Simply put, This function ensures that when it returns a string value, the\n following will hold:\n\n parse_value(_format_value(value)) == value\n\n Args:\n value: The value to format.\n\n Returns:\n A string representation of `value` when `value` is literally representable,\n or `None`.", "id": "f7587:m6"} {"signature": "def __deepcopy__(self, memo):", "body": "addl_msg = ''_raise_unknown_reference_error(self, addl_msg)", "docstring": "Dishonestly implements the __deepcopy__ special method.\n\n See `ConfigurableReference` above. If this method is called, it means there\n was an attempt to use this unknown configurable reference, so we throw an\n error here.\n\n Args:\n memo: The memoization dict (unused).\n\n Raises:\n ValueError: To report that there is no matching configurable.", "id": "f7587:c2:m3"} {"signature": "def register_finalize_hook(fn):", "body": "_FINALIZE_HOOKS.append(fn)return fn", "docstring": "Registers `fn` as a hook that will run during `gin.finalize`.\n\n All finalize hooks should accept the current config, and return a dictionary\n containing any additional parameter bindings that should occur in the form of\n a mapping from (scoped) configurable names to values.\n\n Args:\n fn: The function to register.\n\n Returns:\n `fn`, allowing `register_finalize_hook` to be used as a decorator.", "id": "f7587:m36"} {"signature": "@configurable('', module='')def _retrieve_constant():", "body": "return _CONSTANTS[current_scope_str()]", "docstring": "Fetches and returns a constant from the _CONSTANTS map.", "id": "f7587:m41"} {"signature": "def after_create_session(self, session=None, coord=None):", "body": "config_str = config.operative_config_str()if not tf.gfile.IsDirectory(self._output_dir):tf.gfile.MakeDirs(self._output_dir)global_step_val = if session is not None:global_step = tf.train.get_global_step()if global_step is not None:global_step_val = session.run(global_step)filename = '' % (self._base_name, global_step_val)config_path = os.path.join(self._output_dir, filename)with tf.gfile.GFile(config_path, '') as f:f.write(config_str)if self._summarize_config:md_config_str = self._markdownify_operative_config_str(config_str)summary_metadata = summary_pb2.SummaryMetadata()summary_metadata.plugin_data.plugin_name = ''summary_metadata.plugin_data.content = b''text_tensor = tf.make_tensor_proto(md_config_str)summary = summary_pb2.Summary()summary.value.add(tag='' + self._base_name,tensor=text_tensor,metadata=summary_metadata)if not self._summary_writer:self._summary_writer = tf.summary.FileWriterCache.get(self._output_dir)self._summary_writer.add_summary(summary, global_step_val)self._summary_writer.flush()", "docstring": "Writes out Gin's operative config, and maybe adds a summary of it.", "id": "f7592:c0:m2"} {"signature": "def __init__(self,output_dir,base_name='',summarize_config=True,summary_writer=None):", "body": "self._output_dir = output_dirself._base_name = base_nameself._summarize_config = summarize_configself._summary_writer = summary_writer", "docstring": "Construct the GinConfigSaverHook.\n\n Args:\n output_dir: The directory in which to save the operative config. This\n should in general be the same as the directory in which summaries are\n stored (and thus may be different for train vs. eval jobs.\n base_name: The base name (name excluding path and extension) of the file\n where this hook will write the operative config. Also used as the\n summary tag if summarizing the config for display in TensorBoard.\n summarize_config: Whether to save a summary of the operative config that\n will be loaded by TensorBoard and displayed in its \"Text\" tab.\n summary_writer: A tf.summary.FileWriter object to use for writing\n summaries. If `None` (default), a FileWriter object for `output_dir`\n will be created/retrieved by the tf.summary.FileWriterCache associated\n with `output_dir` in `after_create_session`.", "id": "f7592:c0:m0"} {"signature": "def __contains__(self, complete_selector):", "body": "return complete_selector in self._selector_map", "docstring": "Check if `complete_selector` is present.", "id": "f7593:c0:m8"} {"signature": "def __getitem__(self, complete_selector):", "body": "return self._selector_map[complete_selector]", "docstring": "Look up the value of `complete_selector` (no partial matching).", "id": "f7593:c0:m7"} {"signature": "def minimal_selector(self, complete_selector):", "body": "if complete_selector not in self._selector_map:raise KeyError(\"\".format(complete_selector))selector_components = complete_selector.split('')node = self._selector_treestart = Nonefor i, component in enumerate(reversed(selector_components)):if len(node) == :if start is None:start = -i else:start = Nonenode = node[component]if len(node) > : return complete_selectorreturn ''.join(selector_components[start:])", "docstring": "Returns the minimal selector that uniquely matches `complete_selector`.\n\n Args:\n complete_selector: A complete selector stored in the map.\n\n Returns:\n A partial selector that unambiguously matches `complete_selector`.\n\n Raises:\n KeyError: If `complete_selector` is not in the map.", "id": "f7593:c0:m13"} {"signature": "def _parse_selector(self, scoped=True, allow_periods_in_scope=False):", "body": "if self._current_token.kind != tokenize.NAME:self._raise_syntax_error('')begin_line_num = self._current_token.begin[]begin_char_num = self._current_token.begin[]end_char_num = self._current_token.end[]line = self._current_token.lineselector_parts = []step_parity = while (step_parity == and self._current_token.kind == tokenize.NAME orstep_parity == and self._current_token.value in ('', '')):selector_parts.append(self._current_token.value)step_parity = not step_parityend_char_num = self._current_token.end[]self._advance_one_token()self._skip_whitespace_and_comments()scoped_selector = ''.join(selector_parts)untokenized_scoped_selector = line[begin_char_num:end_char_num]scope_re = IDENTIFIER_REif allow_periods_in_scope:scope_re = MODULE_REselector_re = MODULE_REscope_parts = scoped_selector.split('')valid_format = all(scope_re.match(scope) for scope in scope_parts[:-])valid_format &= bool(selector_re.match(scope_parts[-]))valid_format &= bool(scoped or len(scope_parts) == )if untokenized_scoped_selector != scoped_selector or not valid_format:location = (self._filename, begin_line_num, begin_char_num + , line)self._raise_syntax_error('', location)return scoped_selector", "docstring": "Parse a (possibly scoped) selector.\n\n A selector is a sequence of one or more valid Python-style identifiers\n separated by periods (see also `SelectorMap`). A scoped selector is a\n selector that may be preceded by scope names (separated by slashes).\n\n Args:\n scoped: Whether scopes are allowed.\n allow_periods_in_scope: Whether to allow period characters in the scope\n names preceding the selector.\n\n Returns:\n The parsed selector (as a string).\n\n Raises:\n SyntaxError: If the scope or selector is malformatted.", "id": "f7595:c4:m14"} {"signature": "def __init__(self, string_or_filelike, parser_delegate):", "body": "if hasattr(string_or_filelike, ''):line_reader = string_or_filelike.readlineelse: if six.PY2:string_or_filelike = unicode(string_or_filelike)string_io = io.StringIO(string_or_filelike)line_reader = string_io.readlinedef _text_line_reader():line = line_reader()if isinstance(line, bytes):line = line.decode('')return lineself._token_generator = tokenize.generate_tokens(_text_line_reader)self._filename = getattr(string_or_filelike, '', None)self._current_token = Noneself._delegate = parser_delegateself._advance_one_token()", "docstring": "Construct the parser.\n\n Args:\n string_or_filelike: Either the string to parse, or a file-like object\n supporting the readline method.\n parser_delegate: An instance of the ParserDelegate class, that will be\n responsible for constructing appropriate objects for configurable\n references and macros.", "id": "f7595:c4:m0"} {"signature": "def advance_one_line(self):", "body": "current_line = self._current_token.line_numberwhile current_line == self._current_token.line_number:self._current_token = ConfigParser.Token(*next(self._token_generator))", "docstring": "Advances to next line.", "id": "f7595:c4:m8"} {"signature": "def _maybe_parse_macro(self):", "body": "if self._current_token.value != '':return False, Nonelocation = self._current_location()self._advance_one_token()scoped_name = self._parse_selector(allow_periods_in_scope=True)with utils.try_with_location(location):macro = self._delegate.macro(scoped_name)return True, macro", "docstring": "Try to parse an macro (%scope/name).", "id": "f7595:c4:m18"} {"signature": "def unsupported_versions_1979():", "body": "v = sys.version_infoif (v.major == ) and ((v.minor == and v.micro <= )or (v.minor == and v.micro == and v.releaselevel == '' and v.serial <= )):return Trueelse:return False", "docstring": "Unsupported python versions for itertags\n 3.7.0 - 3.7.2 and 3.8.0a1\n - https://github.com/streamlink/streamlink/issues/1979\n - https://bugs.python.org/issue34294", "id": "f7820:m0"} {"signature": "def __call__(self, values):", "body": "values = product(values, self._map)for value in map(self._mapped_func, filter(self._cmp_filter, values)):if isinstance(value, tuple) and len(value) == :yield valueelse:try:if isinstance(value, dict):for __ in value.items():yield __else:for __ in value:yield __except TypeError:continue", "docstring": "Runs through each value and transform it with a mapped function.", "id": "f7826:c0:m4"} {"signature": "def load_support_plugin(name):", "body": "stack = list(filter(lambda f: f[] == \"\", inspect.stack()))prev_frame = stack[]path = os.path.dirname(prev_frame[])if not os.path.isabs(path):prefix = os.path.normpath(__file__ + \"\")path = os.path.join(prefix, path)return load_module(name, path)", "docstring": "Loads a plugin from the same directory as the calling plugin.\n\n The path used is extracted from the last call in module scope,\n therefore this must be called only from module level in the\n originating plugin or the correct plugin path will not be found.", "id": "f7827:m0"} {"signature": "def parse_cookies(self, cookies, **kwargs):", "body": "for name, value in _parse_keyvalue_list(cookies):self.cookies.set(name, value, **kwargs)", "docstring": "Parses a semi-colon delimited list of cookies.\n\n Example: foo=bar;baz=qux", "id": "f7828:c1:m4"} {"signature": "def parse_query_params(self, cookies, **kwargs):", "body": "for name, value in _parse_keyvalue_list(cookies):self.params[name] = value", "docstring": "Parses a semi-colon delimited list of query parameters.\n\n Example: foo=bar;baz=qux", "id": "f7828:c1:m6"} {"signature": "@classmethoddef xml(cls, res, *args, **kwargs):", "body": "return parse_xml(res.text, *args, **kwargs)", "docstring": "Parses XML from a response.", "id": "f7828:c1:m3"} {"signature": "def xml_findtext(xpath):", "body": "return all(xml_find(xpath),getattr(\"\"),)", "docstring": "Find a XML element via xpath and extract its text.", "id": "f7830:m13"} {"signature": "def getattr(attr, default=None):", "body": "def getter(value):return _getattr(value, attr, default)return transform(getter)", "docstring": "Get a named attribute from an object.\n\n When a default argument is given, it is returned when the attribute\n doesn't exist.", "id": "f7830:m6"} {"signature": "def length(length):", "body": "def min_len(value):if not len(value) >= length:raise ValueError(\"\".format(length, len(value)))return Truereturn min_len", "docstring": "Checks value for minimum length using len().", "id": "f7830:m1"} {"signature": "def load_cookies(self):", "body": "if not self.session or not self.cache:raise RuntimeError(\"\")restored = []for key, value in self.cache.get_all().items():if key.startswith(\"\"):cookie = requests.cookies.create_cookie(**value)self.session.http.cookies.set_cookie(cookie)restored.append(cookie.name)if restored:self.logger.debug(\"\".format(\"\".join(restored)))return restored", "docstring": "Load any stored cookies for the plugin that have not expired.\n\n:return: list of the restored cookie names", "id": "f7834:c1:m16"} {"signature": "@classmethoddef priority(cls, url):", "body": "return NORMAL_PRIORITY", "docstring": "Return the plugin priority for a given URL, by default it returns\nNORMAL priority.\n:return: priority level", "id": "f7834:c1:m9"} {"signature": "def save_cookies(self, cookie_filter=None, default_expires= * * * ):", "body": "if not self.session or not self.cache:raise RuntimeError(\"\")cookie_filter = cookie_filter or (lambda c: True)saved = []for cookie in filter(cookie_filter, self.session.http.cookies):cookie_dict = {}for attr in (\"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\",\"\", \"\", \"\"):cookie_dict[attr] = getattr(cookie, attr, None)cookie_dict[\"\"] = getattr(cookie, \"\", getattr(cookie, \"\", None))expires = default_expiresif cookie_dict['']:expires = int(cookie_dict[''] - time.time())key = \"\".format(cookie.name,cookie.domain,cookie.port_specified and cookie.port or \"\",cookie.path_specified and cookie.path or \"\")self.cache.set(key, cookie_dict, expires)saved.append(cookie.name)if saved:self.logger.debug(\"\".format(\"\".join(saved)))return saved", "docstring": "Store the cookies from ``http`` in the plugin cache until they expire. The cookies can be filtered\nby supplying a filter method. eg. ``lambda c: \"auth\" in c.name``. If no expiry date is given in the\ncookie then the ``default_expires`` value will be used.\n\n:param cookie_filter: a function to filter the cookies\n:type cookie_filter: function\n:param default_expires: time (in seconds) until cookies with no expiry will expire\n:type default_expires: int\n:return: list of the saved cookie names", "id": "f7834:c1:m15"} {"signature": "def __init__(self, name, required=False, requires=None, prompt=None, sensitive=False, argument_name=None,dest=None, **options):", "body": "self.required = requiredself.name = nameself.options = optionsself._argument_name = argument_name self._dest = dest self.requires = requires and (list(requires) if isinstance(requires, (list, tuple)) else [requires]) or []self.prompt = promptself.sensitive = sensitiveself._default = options.get(\"\")", "docstring": ":param name: name of the argument, without -- or plugin name prefixes, eg. ``\"password\"``, ``\"mux-subtitles\"``, etc.\n:param required (bool): if the argument is required for the plugin\n:param requires: list of the arguments which this argument requires, eg ``[\"password\"]``\n:param prompt: if the argument is required and not given, this prompt will show at run time\n:param sensitive (bool): if the argument is sensitive (passwords, etc) and should be masked in logs and if\n prompted use askpass\n:param argument_name:\n:param option_name:\n:param options: arguments passed to :func:`ArgumentParser.add_argument`, excluding requires, and dest", "id": "f7837:c1:m0"} {"signature": "def pkcs7_decode(paddedData, keySize=):", "body": "val = ord(paddedData[-:])if val > keySize:raise StreamError(\"\".format(val))return paddedData[:-val]", "docstring": "Remove the PKCS#7 padding", "id": "f7842:m1"} {"signature": "@classmethoddef parse_variant_playlist(cls, session_, url, name_key=\"\",name_prefix=\"\", check_streams=False,force_restart=False, name_fmt=None,start_offset=, duration=None,**request_params):", "body": "locale = session_.localizationname_key = request_params.pop(\"\", name_key)name_prefix = request_params.pop(\"\", name_prefix)audio_select = session_.options.get(\"\") or []res = session_.http.get(url, exception=IOError, **request_params)try:parser = hls_playlist.load(res.text, base_uri=res.url)except ValueError as err:raise IOError(\"\".format(err))streams = {}for playlist in filter(lambda p: not p.is_iframe, parser.playlists):names = dict(name=None, pixels=None, bitrate=None)audio_streams = []fallback_audio = []default_audio = []preferred_audio = []for media in playlist.media:if media.type == \"\" and media.name:names[\"\"] = media.nameelif media.type == \"\":audio_streams.append(media)for media in audio_streams:if not media.uri:continueif not fallback_audio and media.default:fallback_audio = [media]if not default_audio and (media.autoselect and locale.equivalent(language=media.language)):default_audio = [media]if (('' in audio_select or media.language in audio_select or media.name in audio_select) or((not preferred_audio or media.default) and locale.explicit and locale.equivalent(language=media.language))):preferred_audio.append(media)fallback_audio = fallback_audio or (len(audio_streams) andaudio_streams[].uri and [audio_streams[]])if playlist.stream_info.resolution:width, height = playlist.stream_info.resolutionnames[\"\"] = \"\".format(height)if playlist.stream_info.bandwidth:bw = playlist.stream_info.bandwidthif bw >= :names[\"\"] = \"\".format(int(bw / ))else:names[\"\"] = \"\".format(bw / )if name_fmt:stream_name = name_fmt.format(**names)else:stream_name = (names.get(name_key) or names.get(\"\") ornames.get(\"\") or names.get(\"\"))if not stream_name:continueif stream_name in streams: stream_name = \"\".format(stream_name)num_alts = len(list(filter(lambda n: n.startswith(stream_name), streams.keys())))if num_alts >= :continueelif num_alts > :stream_name = \"\".format(stream_name, num_alts + )if check_streams:try:session_.http.get(playlist.uri, **request_params)except KeyboardInterrupt:raiseexcept Exception:continueexternal_audio = preferred_audio or default_audio or fallback_audioif external_audio and FFMPEGMuxer.is_usable(session_):external_audio_msg = \"\".join([\"\".format(x.language, (x.name or \"\"))for x in external_audio])log.debug(\"\", name_prefix + stream_name,external_audio_msg)stream = MuxedHLSStream(session_,video=playlist.uri,audio=[x.uri for x in external_audio if x.uri],force_restart=force_restart,start_offset=start_offset,duration=duration,**request_params)else:stream = cls(session_,playlist.uri,force_restart=force_restart,start_offset=start_offset,duration=duration,**request_params)streams[name_prefix + stream_name] = streamreturn streams", "docstring": "Attempts to parse a variant playlist and return its streams.\n\n :param url: The URL of the variant playlist.\n :param name_key: Prefer to use this key as stream name, valid keys are:\n name, pixels, bitrate.\n :param name_prefix: Add this prefix to the stream names.\n :param check_streams: Only allow streams that are accessible.\n :param force_restart: Start at the first segment even for a live stream\n :param name_fmt: A format string for the name, allowed format keys are\n name, pixels, bitrate.", "id": "f7842:c4:m4"} {"signature": "def put(self, segment):", "body": "if self.closed:returnif segment is not None:future = self.executor.submit(self.fetch, segment,retries=self.retries)else:future = Noneself.queue(self.futures, (segment, future))", "docstring": "Adds a segment to the download pool and write queue.", "id": "f7850:c1:m2"} {"signature": "def __reduce__(self):", "body": "items = [[k, self[k]] for k in self]inst_dict = vars(self).copy()for k in vars(OrderedDict()):inst_dict.pop(k, None)if inst_dict:return (self.__class__, (items,), inst_dict)return self.__class__, (items,)", "docstring": "Return state information for pickling", "id": "f7864:c0:m17"} {"signature": "def __setitem__(self, key, value, dict_setitem=dict.__setitem__):", "body": "if key not in self:root = self.__rootlast = root[]last[] = root[] = self.__map[key] = [last, root, key]dict_setitem(self, key, value)", "docstring": "od.__setitem__(i, y) <==> od[i]=y", "id": "f7864:c0:m1"} {"signature": "def viewitems(self):", "body": "return ItemsView(self)", "docstring": "od.viewitems() -> a set-like object providing a view on od's items", "id": "f7864:c0:m24"} {"signature": "def pop(self, key, default=__marker):", "body": "if key in self:result = self[key]del self[key]return resultif default is self.__marker:raise KeyError(key)return default", "docstring": "od.pop(k[,d]) -> v, remove specified key and return the corresponding value.\n If key is not found, d is returned if given, otherwise KeyError is raised.", "id": "f7864:c0:m14"} {"signature": "@classmethoddef fromkeys(cls, iterable, value=None):", "body": "d = cls()for key in iterable:d[key] = valuereturn d", "docstring": "OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S\n and values equal to v (which defaults to None).", "id": "f7864:c0:m19"} {"signature": "def send(self, request, **kwargs):", "body": "if request.method not in (\"\", \"\"):raise ValueError(\"\" % request.method)url_parts = urlparse(request.url)if is_win32 and url_parts.netloc.endswith(\"\"):url_parts = url_parts._replace(path=\"\" + url_parts.netloc + url_parts.path, netloc='')if url_parts.netloc and url_parts.netloc not in (\"\", \"\", \"\", \"\"):raise ValueError(\"\")if url_parts.netloc in (\"\", \"\"):pwd = os.path.abspath(url_parts.netloc).replace(os.sep, \"\") + \"\"if is_win32:pwd = \"\" + pwdurl_parts = url_parts._replace(path=urljoin(pwd, url_parts.path.lstrip(\"\")))resp = Response()resp.url = request.urltry:if url_parts.netloc == \"\":if is_py3:resp.raw = sys.stdin.bufferelse:resp.raw = sys.stdinresp.url = \"\" + os.path.abspath(\"\").replace(os.sep, \"\") + \"\"else:path_parts = [unquote(p) for p in url_parts.path.split('')]while path_parts and not path_parts[]:path_parts.pop()if any(os.sep in p for p in path_parts):raise IOError(errno.ENOENT, os.strerror(errno.ENOENT))if path_parts and (path_parts[].endswith('') orpath_parts[].endswith('')):path_drive = path_parts.pop()if path_drive.endswith(''):path_drive = path_drive[:-] + ''while path_parts and not path_parts[]:path_parts.pop()else:path_drive = ''path = path_drive + os.sep + os.path.join(*path_parts)if path_drive and not os.path.splitdrive(path):path = os.sep + os.path.join(path_drive, *path_parts)resp.raw = io.open(path, \"\")resp.raw.release_conn = resp.raw.closeexcept IOError as e:if e.errno == errno.EACCES:resp.status_code = codes.forbiddenelif e.errno == errno.ENOENT:resp.status_code = codes.not_foundelse:resp.status_code = codes.bad_requestresp_str = str(e).encode(locale.getpreferredencoding(False))resp.raw = BytesIO(resp_str)resp.headers[''] = len(resp_str)resp.raw.release_conn = resp.raw.closeelse:resp.status_code = codes.okresp_stat = os.fstat(resp.raw.fileno())if stat.S_ISREG(resp_stat.st_mode):resp.headers[''] = resp_stat.st_sizereturn resp", "docstring": "Wraps a file, described in request, in a Response object.\n\n :param request: The PreparedRequest` being \"sent\".\n :returns: a Response object containing the file", "id": "f7867:c0:m0"} {"signature": "def get_versions():", "body": "cfg = get_config()verbose = cfg.verbosetry:return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,verbose)except NotThisMethod:passtry:root = os.path.realpath(__file__)for i in cfg.versionfile_source.split(''):root = os.path.dirname(root)except NameError:return {\"\": \"\", \"\": None,\"\": None,\"\": \"\",\"\": None}try:pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)return render(pieces, cfg.style)except NotThisMethod:passtry:if cfg.parentdir_prefix:return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)except NotThisMethod:passreturn {\"\": \"\", \"\": None,\"\": None,\"\": \"\", \"\": None}", "docstring": "Get version information or return default if unable to do so.", "id": "f7868:m16"} {"signature": "def versions_from_parentdir(parentdir_prefix, root, verbose):", "body": "rootdirs = []for i in range():dirname = os.path.basename(root)if dirname.startswith(parentdir_prefix):return {\"\": dirname[len(parentdir_prefix):],\"\": None,\"\": False, \"\": None, \"\": None}else:rootdirs.append(root)root = os.path.dirname(root) if verbose:print(\"\" %(str(rootdirs), parentdir_prefix))raise NotThisMethod(\"\")", "docstring": "Try to determine the version from the parent directory name.\n\n Source tarballs conventionally unpack into a directory that includes both\n the project name and a version string. We will also support searching up\n two directory levels for an appropriately named parent directory", "id": "f7868:m4"} {"signature": "def render_pep440_post(pieces):", "body": "if pieces[\"\"]:rendered = pieces[\"\"]if pieces[\"\"] or pieces[\"\"]:rendered += \"\" % pieces[\"\"]if pieces[\"\"]:rendered += \"\"rendered += plus_or_dot(pieces)rendered += \"\" % pieces[\"\"]else:rendered = \"\" % pieces[\"\"]if pieces[\"\"]:rendered += \"\"rendered += \"\" % pieces[\"\"]return rendered", "docstring": "TAG[.postDISTANCE[.dev0]+gHEX] .\n\n The \".dev0\" means dirty. Note that .dev0 sorts backwards\n (a dirty tree will appear \"older\" than the corresponding clean one),\n but you shouldn't be releasing software with -dirty anyways.\n\n Exceptions:\n 1: no tags. 0.postDISTANCE[.dev0]", "id": "f7868:m11"} {"signature": "def plus_or_dot(pieces):", "body": "if \"\" in pieces.get(\"\", \"\"):return \"\"return \"\"", "docstring": "Return a + if we don't already have one, else return a .", "id": "f7868:m8"} {"signature": "def register_vcs_handler(vcs, method): ", "body": "def decorate(f):\"\"\"\"\"\"if vcs not in HANDLERS:HANDLERS[vcs] = {}HANDLERS[vcs][method] = freturn freturn decorate", "docstring": "Decorator to mark a method as the handler for a particular VCS.", "id": "f7868:m2"} {"signature": "def vod_data(self, vid=None):", "body": "page = self.session.http.get(self.url)m = self._vod_re.search(page.text)vod_data_url = m and urljoin(self.url, m.group())if vod_data_url:self.logger.debug(\"\", vod_data_url)res = self.session.http.get(vod_data_url)return self.session.http.json(res)", "docstring": "Get the VOD data path and the default VOD ID\n:return:", "id": "f7878:c0:m6"} {"signature": "def get_stream_id(self, html):", "body": "stream_id = stream_id_pattern.search(html)if not stream_id:self.logger.error(\"\")return stream_id.group(\"\")", "docstring": "Returns the stream_id contained in the HTML.", "id": "f7905:c0:m1"} {"signature": "def _get_streams(self):", "body": "match = self._url_re.match(self.url).groupdict()if match.get(\"\") == \"\":return self._get_live_streams(match)else:return self._get_vod_stream()", "docstring": "Find the streams for euronews\n:return:", "id": "f7923:c0:m3"} {"signature": "def get_info(self, media_id, fields=None, schema=None):", "body": "params = {\"\": media_id}if fields:params[\"\"] = \"\".join(fields)return self._api_call(\"\", params, schema=schema)", "docstring": "Returns the data for a certain media item.\n\n:param media_id: id that identifies the media item to be accessed.\n:param fields: list of the media\"s field to be returned. By default the\nAPI returns some fields, but others are not returned unless they are\nexplicity asked for. I have no real documentation on the fields, but\nthey all seem to start with the \"media.\" prefix (e.g. media.name,\nmedia.stream_data).\n:param schema: validation schema to use", "id": "f7928:c1:m6"} {"signature": "@classmethoddef priority(cls, url):", "body": "m = cls._url_re.match(url)if m:prefix, url = cls._url_re.match(url).groups()url_path = urlparse(url).pathif prefix is None and url_path.endswith(\"\"):return LOW_PRIORITYelif prefix is not None:return NORMAL_PRIORITYreturn NO_PRIORITY", "docstring": "Returns LOW priority if the URL is not prefixed with hls:// but ends with\n.m3u8 and return NORMAL priority if the URL is prefixed.\n:param url: the URL to find the plugin priority for\n:return: plugin priority for the given URL", "id": "f7935:c0:m0"} {"signature": "def _get_streams(self):", "body": "self.login()res = self.session.http.get(self.url)matches = self.config_re.finditer(res.text)try:config = self.config_schema.validate(dict([m.group(\"\", \"\") for m in matches]))except PluginError:returnif config[\"\"]:self.logger.debug(\"\", config[\"\"])api_url = urljoin(self.url, urljoin(config[\"\"], config[\"\"]))elif config[\"\"]:self.logger.debug(\"\", config[\"\"])api_url = urljoin(self.url, config[\"\"])else:returnares = self.session.http.get(api_url)data = self.session.http.json(ares, schema=self.api_schema)viewing_urls = data[\"\"]if \"\" in viewing_urls:self.logger.error(\"\", viewing_urls[\"\"])else:for url in viewing_urls[\"\"]:try:label = \"\".format(url.get(\"\", url[\"\"]))except KeyError:label = \"\"if url[\"\"] == \"\" and RTMPStream.is_usable(self.session):params = {\"\": url[\"\"],\"\": self.url,\"\": True,}yield label, RTMPStream(self.session, params)elif url[\"\"] == \"\":for s in HLSStream.parse_variant_playlist(self.session, url[\"\"]).items():yield s", "docstring": "Get the config object from the page source and call the\nAPI to get the list of streams\n:return:", "id": "f7969:c0:m2"} {"signature": "def login(self):", "body": "email = self.get_option(\"\")password = self.get_option(\"\")if email and password:res = self.session.http.get(self.login_url)csrf_match = self.csrf_re.search(res.text)token = csrf_match and csrf_match.group()self.logger.debug(\"\", email, token)res = self.session.http.post(self.login_url,data=dict(login=email, password=password, csrfmiddlewaretoken=token),allow_redirects=False,raise_for_status=False,headers={\"\": self.login_url})if res.status_code != :self.logger.error(\"\", email)", "docstring": "Attempt a login to LiveEdu.tv", "id": "f7969:c0:m1"} {"signature": "def login(self, ptrt_url):", "body": "def auth_check(res):return ptrt_url in ([h.url for h in res.history] + [res.url])session_res = self.session.http.get(self.session_url,params=dict(ptrt=ptrt_url))if auth_check(session_res):log.debug(\"\")return Truehttp_nonce = self._extract_nonce(session_res)res = self.session.http.post(self.auth_url,params=dict(ptrt=ptrt_url,nonce=http_nonce),data=dict(jsEnabled=True,username=self.get_option(\"\"),password=self.get_option(''),attempts=),headers={\"\": self.url})return auth_check(res)", "docstring": "Create session using BBC ID. See https://www.bbc.co.uk/usingthebbc/account/\n\n:param ptrt_url: The snapback URL to redirect to after successful authentication\n:type ptrt_url: string\n:return: Whether authentication was successful\n:rtype: bool", "id": "f7995:c0:m6"} {"signature": "@propertydef device_id(self):", "body": "if self._device_id is None:self._device_id = \"\".join(random.choice(\"\") for _ in range())return self._device_id", "docstring": "Randomly generated deviceId.\n:return:", "id": "f8027:c0:m2"} {"signature": "def hours_minutes_seconds(value):", "body": "try:return int(value)except ValueError:passmatch = (_hours_minutes_seconds_re.match(value)or _hours_minutes_seconds_2_re.match(value))if not match:raise ValueErrors = s += int(match.group(\"\") or \"\") * * s += int(match.group(\"\") or \"\") * s += int(match.group(\"\") or \"\")return s", "docstring": "converts a timestamp to seconds\n\n - hours:minutes:seconds to seconds\n - minutes:seconds to seconds\n - 11h22m33s to seconds\n - 11h to seconds\n - 20h15m to seconds\n - seconds to seconds\n\n :param value: hh:mm:ss ; 00h00m00s ; seconds\n :return: seconds", "id": "f8052:m0"} {"signature": "def prepend_www(url):", "body": "parsed = urlparse(url)if parsed.netloc.split(\"\")[] != \"\":return parsed.scheme + \"\" + parsed.netloc + parsed.pathelse:return url", "docstring": "Changes google.com to www.google.com", "id": "f8056:m3"} {"signature": "def parse_qsd(data, name=\"\", exception=PluginError, schema=None, **params):", "body": "value = dict(parse_qsl(data, **params))if schema:value = schema.validate(value, name=name, exception=exception)return value", "docstring": "Parses a query string into a dict.\n\n Unlike parse_qs and parse_qsl, duplicate keys are not preserved in\n favor of a simpler return value.", "id": "f8056:m6"} {"signature": "def url_equal(first, second, ignore_scheme=False, ignore_netloc=False, ignore_path=False, ignore_params=False,ignore_query=False, ignore_fragment=False):", "body": "firstp = urlparse(first)secondp = urlparse(second)return ((firstp.scheme == secondp.scheme or ignore_scheme) and(firstp.netloc == secondp.netloc or ignore_netloc) and(firstp.path == secondp.path or ignore_path) and(firstp.params == secondp.params or ignore_params) and(firstp.query == secondp.query or ignore_query) and(firstp.fragment == secondp.fragment or ignore_fragment))", "docstring": "Compare two URLs and return True if they are equal, some parts of the URLs can be ignored\n:param first: URL\n:param second: URL\n:param ignore_scheme: ignore the scheme\n:param ignore_netloc: ignore the netloc\n:param ignore_path: ignore the path\n:param ignore_params: ignore the params\n:param ignore_query: ignore the query string\n:param ignore_fragment: ignore the fragment\n:return: result of comparison", "id": "f8059:m1"} {"signature": "def update_qsd(url, qsd=None, remove=None):", "body": "qsd = qsd or {}remove = remove or []parsed = urlparse(url)current_qsd = OrderedDict(parse_qsl(parsed.query))if remove == \"\":remove = list(current_qsd.keys())for key in remove:if key not in qsd:del current_qsd[key]for key, value in qsd.items():if value:current_qsd[key] = valuereturn parsed._replace(query=urlencode(current_qsd)).geturl()", "docstring": "Update or remove keys from a query string in a URL\n\n:param url: URL to update\n:param qsd: dict of keys to update, a None value leaves it unchanged\n:param remove: list of keys to remove, or \"*\" to remove all\n note: updated keys are never removed, even if unchanged\n:return: updated URL", "id": "f8059:m3"} {"signature": "def set_loglevel(self, level):", "body": "self.logger.set_level(level)", "docstring": "Sets the log level used by this session.\n\n Valid levels are: \"none\", \"error\", \"warning\", \"info\"\n and \"debug\".\n\n :param level: level of logging to output", "id": "f8063:c0:m6"} {"signature": "def makeRecord(self, name, level, fn, lno, msg, args, exc_info,func=None, extra=None, sinfo=None):", "body": "if name.startswith(\"\"):rv = _LogRecord(name, level, fn, lno, msg, args, exc_info, func, sinfo)else:rv = _CompatLogRecord(name, level, fn, lno, msg, args, exc_info, func, sinfo)if extra is not None:for key in extra:if (key in [\"\", \"\"]) or (key in rv.__dict__):raise KeyError(\"\" % key)rv.__dict__[key] = extra[key]return rv", "docstring": "A factory method which can be overridden in subclasses to create\nspecialized LogRecords.", "id": "f8064:c2:m2"} {"signature": "def iter_http_requests(server, player):", "body": "while not player or player.running:try:yield server.open(timeout=)except OSError:continue", "docstring": "Repeatedly accept HTTP connections on a server.\n\n Forever if the serving externally, or while a player is running if it is not\n empty.", "id": "f8069:m4"} {"signature": "def setup_plugins(extra_plugin_dir=None):", "body": "if os.path.isdir(PLUGINS_DIR):load_plugins([PLUGINS_DIR])if extra_plugin_dir:load_plugins(extra_plugin_dir)", "docstring": "Loads any additional plugins.", "id": "f8069:m23"} {"signature": "def fetch_streams_with_retry(plugin, interval, count):", "body": "try:streams = fetch_streams(plugin)except PluginError as err:log.error(u\"\", err)streams = Noneif not streams:log.info(\"\"\"\", interval)attempts = while not streams:sleep(interval)try:streams = fetch_streams(plugin)except FatalPluginError as err:raiseexcept PluginError as err:log.error(u\"\", err)if count > :attempts += if attempts >= count:breakreturn streams", "docstring": "Attempts to fetch streams repeatedly\n until some are returned or limit hit.", "id": "f8069:m12"} {"signature": "def output_stream_passthrough(plugin, stream):", "body": "global outputtitle = create_title(plugin)filename = ''.format(stream_to_url(stream))output = PlayerOutput(args.player, args=args.player_args,filename=filename, call=True,quiet=not args.verbose_player,title=title)try:log.info(\"\", args.player)output.open()except OSError as err:console.exit(\"\", args.player, err)return Falsereturn True", "docstring": "Prepares a filename to be passed to the player.", "id": "f8069:m6"} {"signature": "def log_current_versions():", "body": "if logger.root.isEnabledFor(logging.DEBUG):if sys.platform == \"\":os_version = \"\".format(platform.mac_ver()[])elif sys.platform.startswith(\"\"):os_version = \"\".format(platform.system(), platform.release())else:os_version = platform.platform()log.debug(\"\".format(os_version))log.debug(\"\".format(platform.python_version()))log.debug(\"\".format(streamlink_version))log.debug(\"\".format(requests.__version__, socks_version, websocket_version))", "docstring": "Show current installed versions", "id": "f8069:m29"} {"signature": "def setup_args(parser, config_files=[], ignore_unknown=False):", "body": "global argsarglist = sys.argv[:]for config_file in filter(os.path.isfile, config_files):arglist.insert(, \"\" + config_file)args, unknown = parser.parse_known_args(arglist)if unknown and not ignore_unknown:msg = gettext('')parser.error(msg % ''.join(unknown))if args.stream:args.stream = [stream.lower() for stream in args.stream]if not args.url and args.url_param:args.url = args.url_param", "docstring": "Parses arguments.", "id": "f8069:m19"} {"signature": "def check_file_output(filename, force):", "body": "log.debug(\"\")if os.path.isfile(filename) and not force:if sys.stdin.isatty():answer = console.ask(\"\",filename)if answer.lower() != \"\":sys.exit()else:log.error(\"\".format(filename))sys.exit()return FileOutput(filename)", "docstring": "Checks if file already exists and ask the user if it should\n be overwritten if it does.", "id": "f8069:m0"} {"signature": "def output_stream_http(plugin, initial_streams, external=False, port=):", "body": "global outputif not external:if not args.player:console.exit(\"\"\"\"\"\")title = create_title(plugin)server = create_http_server()player = output = PlayerOutput(args.player, args=args.player_args,filename=server.url,quiet=not args.verbose_player,title=title)try:log.info(\"\", args.player)if player:player.open()except OSError as err:console.exit(\"\",args.player, err)else:server = create_http_server(host=None, port=port)player = Nonelog.info(\"\")for url in server.urls:log.info(\"\" + url)for req in iter_http_requests(server, player):user_agent = req.headers.get(\"\") or \"\"log.info(\"\".format(user_agent))stream_fd = prebuffer = Nonewhile not stream_fd and (not player or player.running):try:streams = initial_streams or fetch_streams(plugin)initial_streams = Nonefor stream_name in (resolve_stream_name(streams, s) for s in args.stream):if stream_name in streams:stream = streams[stream_name]breakelse:log.info(\"\"\"\")sleep()continueexcept PluginError as err:log.error(u\"\", err)continuetry:log.info(\"\", stream_name,type(stream).shortname())stream_fd, prebuffer = open_stream(stream)except StreamError as err:log.error(\"\", err)if stream_fd and prebuffer:log.debug(\"\")read_stream(stream_fd, server, prebuffer)server.close(True)player.close()server.close()", "docstring": "Continuously output the stream over HTTP.", "id": "f8069:m5"} {"signature": "def authenticate_twitch_oauth():", "body": "client_id = TWITCH_CLIENT_IDredirect_uri = \"\"url = (\"\"\"\"\"\"\"\"\"\"\"\").format(client_id, redirect_uri)console.msg(\"\"\"\")try:if not webbrowser.open_new_tab(url):raise webbrowser.Errorexcept webbrowser.Error:console.exit(\"\"\"\".format(url))", "docstring": "Opens a web browser to allow the user to grant Streamlink\n access to their Twitch account.", "id": "f8069:m17"} {"signature": "def handle_url():", "body": "try:plugin = streamlink.resolve_url(args.url)setup_plugin_options(streamlink, plugin)log.info(\"\",plugin.module, args.url)plugin_args = []for parg in plugin.arguments:value = plugin.get_option(parg.dest)if value:plugin_args.append((parg, value))if plugin_args:log.debug(\"\")for parg, value in plugin_args:log.debug(\"\".format(parg.argument_name(plugin.module),value if not parg.sensitive else (\"\" * ),parg.dest))if args.retry_max or args.retry_streams:retry_streams = retry_max = if args.retry_streams:retry_streams = args.retry_streamsif args.retry_max:retry_max = args.retry_maxstreams = fetch_streams_with_retry(plugin, retry_streams,retry_max)else:streams = fetch_streams(plugin)except NoPluginError:console.exit(\"\", args.url)except PluginError as err:console.exit(u\"\", err)if not streams:console.exit(\"\", args.url)if args.default_stream and not args.stream and not args.json:args.stream = args.default_streamif args.stream:validstreams = format_valid_streams(plugin, streams)for stream_name in args.stream:if stream_name in streams:log.info(\"\", validstreams)handle_stream(plugin, streams, stream_name)returnerr = (\"\"\"\".format(\"\".join(args.stream)))if console.json:console.msg_json(dict(streams=streams, plugin=plugin.module,error=err))else:console.exit(\"\",err, validstreams)else:if console.json:console.msg_json(dict(streams=streams, plugin=plugin.module))else:validstreams = format_valid_streams(plugin, streams)console.msg(\"\", validstreams)", "docstring": "The URL handler.\n\n Attempts to resolve the URL to a plugin and then attempts\n to fetch a list of available streams.\n\n Proceeds to handle stream if user specified a valid one,\n otherwise output list of valid streams.", "id": "f8069:m15"} {"signature": "def terminal_width(value):", "body": "if isinstance(value, bytes):value = value.decode(\"\", \"\")return sum(map(get_width, map(ord, value)))", "docstring": "Returns the width of the string it would be when displayed.", "id": "f8072:m1"} {"signature": "def create_status_line(**params):", "body": "max_size = get_terminal_size().columns - for fmt in PROGRESS_FORMATS:status = fmt.format(**params)if len(status) <= max_size:breakreturn status", "docstring": "Creates a status line with appropriate size.", "id": "f8072:m6"} {"signature": "def versions_from_parentdir(parentdir_prefix, root, verbose):", "body": "rootdirs = []for i in range():dirname = os.path.basename(root)if dirname.startswith(parentdir_prefix):return {\"\": dirname[len(parentdir_prefix):],\"\": None,\"\": False, \"\": None, \"\": None}else:rootdirs.append(root)root = os.path.dirname(root) if verbose:print(\"\" %(str(rootdirs), parentdir_prefix))raise NotThisMethod(\"\")", "docstring": "Try to determine the version from the parent directory name.\n\n Source tarballs conventionally unpack into a directory that includes both\n the project name and a version string. We will also support searching up\n two directory levels for an appropriately named parent directory", "id": "f8078:m8"} {"signature": "def get_cmdclass():", "body": "if \"\" in sys.modules:del sys.modules[\"\"]cmds = {}from distutils.core import Commandclass cmd_version(Command):description = \"\"user_options = []boolean_options = []def initialize_options(self):passdef finalize_options(self):passdef run(self):vers = get_versions(verbose=True)print(\"\" % vers[\"\"])print(\"\" % vers.get(\"\"))print(\"\" % vers.get(\"\"))print(\"\" % vers.get(\"\"))if vers[\"\"]:print(\"\" % vers[\"\"])cmds[\"\"] = cmd_versionif \"\" in sys.modules:from setuptools.command.build_py import build_py as _build_pyelse:from distutils.command.build_py import build_py as _build_pyclass cmd_build_py(_build_py):def run(self):root = get_root()cfg = get_config_from_root(root)versions = get_versions()_build_py.run(self)if cfg.versionfile_build:target_versionfile = os.path.join(self.build_lib,cfg.versionfile_build)print(\"\" % target_versionfile)write_to_version_file(target_versionfile, versions)cmds[\"\"] = cmd_build_pyif \"\" in sys.modules: from cx_Freeze.dist import build_exe as _build_execlass cmd_build_exe(_build_exe):def run(self):root = get_root()cfg = get_config_from_root(root)versions = get_versions()target_versionfile = cfg.versionfile_sourceprint(\"\" % target_versionfile)write_to_version_file(target_versionfile, versions)_build_exe.run(self)os.unlink(target_versionfile)with open(cfg.versionfile_source, \"\") as f:LONG = LONG_VERSION_PY[cfg.VCS]f.write(LONG %{\"\": \"\",\"\": cfg.style,\"\": cfg.tag_prefix,\"\": cfg.parentdir_prefix,\"\": cfg.versionfile_source,})cmds[\"\"] = cmd_build_exedel cmds[\"\"]if '' in sys.modules: try:from py2exe.distutils_buildexe import py2exe as _py2exe except ImportError:from py2exe.build_exe import py2exe as _py2exe class cmd_py2exe(_py2exe):def run(self):root = get_root()cfg = get_config_from_root(root)versions = get_versions()target_versionfile = cfg.versionfile_sourceprint(\"\" % target_versionfile)write_to_version_file(target_versionfile, versions)_py2exe.run(self)os.unlink(target_versionfile)with open(cfg.versionfile_source, \"\") as f:LONG = LONG_VERSION_PY[cfg.VCS]f.write(LONG %{\"\": \"\",\"\": cfg.style,\"\": cfg.tag_prefix,\"\": cfg.parentdir_prefix,\"\": cfg.versionfile_source,})cmds[\"\"] = cmd_py2exeif \"\" in sys.modules:from setuptools.command.sdist import sdist as _sdistelse:from distutils.command.sdist import sdist as _sdistclass cmd_sdist(_sdist):def run(self):versions = get_versions()self._versioneer_generated_versions = versionsself.distribution.metadata.version = versions[\"\"]return _sdist.run(self)def make_release_tree(self, base_dir, files):root = get_root()cfg = get_config_from_root(root)_sdist.make_release_tree(self, base_dir, files)target_versionfile = os.path.join(base_dir, cfg.versionfile_source)print(\"\" % target_versionfile)write_to_version_file(target_versionfile,self._versioneer_generated_versions)cmds[\"\"] = cmd_sdistreturn cmds", "docstring": "Get the custom setuptools/distutils subclasses used by Versioneer.", "id": "f8078:m21"} {"signature": "def get_versions(verbose=False):", "body": "if \"\" in sys.modules:del sys.modules[\"\"]root = get_root()cfg = get_config_from_root(root)assert cfg.VCS is not None, \"\"handlers = HANDLERS.get(cfg.VCS)assert handlers, \"\" % cfg.VCSverbose = verbose or cfg.verboseassert cfg.versionfile_source is not None,\"\"assert cfg.tag_prefix is not None, \"\"versionfile_abs = os.path.join(root, cfg.versionfile_source)get_keywords_f = handlers.get(\"\")from_keywords_f = handlers.get(\"\")if get_keywords_f and from_keywords_f:try:keywords = get_keywords_f(versionfile_abs)ver = from_keywords_f(keywords, cfg.tag_prefix, verbose)if verbose:print(\"\" % ver)return verexcept NotThisMethod:passtry:ver = versions_from_file(versionfile_abs)if verbose:print(\"\" % (versionfile_abs, ver))return verexcept NotThisMethod:passfrom_vcs_f = handlers.get(\"\")if from_vcs_f:try:pieces = from_vcs_f(cfg.tag_prefix, root, verbose)ver = render(pieces, cfg.style)if verbose:print(\"\" % ver)return verexcept NotThisMethod:passtry:if cfg.parentdir_prefix:ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose)if verbose:print(\"\" % ver)return verexcept NotThisMethod:passif verbose:print(\"\")return {\"\": \"\", \"\": None,\"\": None, \"\": \"\",\"\": None}", "docstring": "Get the project version from whatever source is available.\n\n Returns dict with two keys: 'version' and 'full'.", "id": "f8078:m19"} {"signature": "def register_vcs_handler(vcs, method): ", "body": "def decorate(f):\"\"\"\"\"\"if vcs not in HANDLERS:HANDLERS[vcs] = {}HANDLERS[vcs][method] = freturn freturn decorate", "docstring": "Decorator to mark a method as the handler for a particular VCS.", "id": "f8078:m2"} {"signature": "def write_to_version_file(filename, versions):", "body": "os.unlink(filename)contents = json.dumps(versions, sort_keys=True,indent=, separators=(\"\", \"\"))with open(filename, \"\") as f:f.write(SHORT_VERSION_PY % contents)print(\"\" % (filename, versions[\"\"]))", "docstring": "Write the given version number to the given _version.py file.", "id": "f8078:m10"} {"signature": "def do_vcs_install(manifest_in, versionfile_source, ipy):", "body": "GITS = [\"\"]if sys.platform == \"\":GITS = [\"\", \"\"]files = [manifest_in, versionfile_source]if ipy:files.append(ipy)try:me = __file__if me.endswith(\"\") or me.endswith(\"\"):me = os.path.splitext(me)[] + \"\"versioneer_file = os.path.relpath(me)except NameError:versioneer_file = \"\"files.append(versioneer_file)present = Falsetry:f = open(\"\", \"\")for line in f.readlines():if line.strip().startswith(versionfile_source):if \"\" in line.strip().split()[:]:present = Truef.close()except EnvironmentError:passif not present:f = open(\"\", \"\")f.write(\"\" % versionfile_source)f.close()files.append(\"\")run_command(GITS, [\"\", \"\"] + files)", "docstring": "Git-specific installation logic for Versioneer.\n\n For Git, this means creating/changing .gitattributes to mark _version.py\n for export-subst keyword substitution.", "id": "f8078:m7"} {"signature": "@register_vcs_handler(\"\", \"\")def git_get_keywords(versionfile_abs):", "body": "keywords = {}try:f = open(versionfile_abs, \"\")for line in f.readlines():if line.strip().startswith(\"\"):mo = re.search(r'', line)if mo:keywords[\"\"] = mo.group()if line.strip().startswith(\"\"):mo = re.search(r'', line)if mo:keywords[\"\"] = mo.group()if line.strip().startswith(\"\"):mo = re.search(r'', line)if mo:keywords[\"\"] = mo.group()f.close()except EnvironmentError:passreturn keywords", "docstring": "Extract version information from the given file.", "id": "f8078:m4"} {"signature": "async def _close_session(self) -> None:", "body": "await self.client_session.close()await asyncio.sleep()", "docstring": "According to the aiohttp documentation all opened sessions need to be closed, before leaving the program. This\nfunction takes care that the client session is closed. This async co-routine is automatically scheduled, when\nthe client object is destroyed.", "id": "f8085:c1:m2"} {"signature": "@staticmethoddef _transform_data(data: dict) -> dict:", "body": "for key in data.keys():return_value = data[key]if isinstance(return_value, dict):return return_valuereturn data", "docstring": "Each CloudStack API call returns a nested dictionary structure. The first level contains only one key indicating\nthe API that originated the response. This function removes that first level from the data returned to the\ncaller.\n\n:param data: Response of the API call\n:type data: dict\n:return: Simplified response without the information about the API that originated the response.\n:rtype: dict", "id": "f8085:c1:m7"} {"signature": "def __getattr__(self, command: str) -> Callable:", "body": "return partial(self.request, command=command)", "docstring": "This allows to support any available and future CloudStack API in this client. The returned partial function can\ndirectly be used to call the corresponding CloudStack API including all supported parameters.\n\n:param command: Command string indicating the CloudStack API to be called.\n:type command: str\n:return: Partial function that can be used the call the CloudStack API specified in the command string.", "id": "f8085:c1:m3"} {"signature": "async def _handle_response(self, response: aiohttp.client_reqrep.ClientResponse, await_final_result: bool) -> dict:", "body": "try:data = await response.json()except aiohttp.client_exceptions.ContentTypeError:text = await response.text()logging.debug(''.format(text))raise CloudStackClientException(message=\"\")else:data = self._transform_data(data)if response.status != :raise CloudStackClientException(message=\"\",error_code=data.get(\"\", response.status),error_text=data.get(\"\"),response=data)while await_final_result and ('' in data):await asyncio.sleep(self.async_poll_latency)data = await self.queryAsyncJobResult(jobid=data[''])if data['']: if not data['']: try:return data['']except KeyError:passlogging.debug(\"\".format(str(data)))raise CloudStackClientException(message=\"\",error_code=data.get(\"\"),error_text=data.get(\"\"),response=data)return data", "docstring": "Handles the response returned from the CloudStack API. Some CloudStack API are implemented asynchronous, which\nmeans that the API call returns just a job id. The actually expected API response is postponed and a specific\nasyncJobResults API has to be polled using the job id to get the final result once the API call has been\nprocessed.\n\n:param response: The response returned by the aiohttp call.\n:type response: aiohttp.client_reqrep.ClientResponse\n:param await_final_result: Specifier that indicates whether the function should poll the asyncJobResult API\n until the asynchronous API call has been processed\n:type await_final_result: bool\n:return: Dictionary containing the JSON response of the API call\n:rtype: dict", "id": "f8085:c1:m5"} {"signature": "def __init__(self,string: Union[str, MutableSequence[str]],header: bool = False,_type_to_spans: Dict[str, List[List[int]]] = None,_span: int = None,_type: int = None,_match: Match = None,_attrs_match: Match = None,) -> None:", "body": "super().__init__(string, _type_to_spans, _span, _type)self._header = headerif _match:string = self.stringself._match_cache = _match, stringif _attrs_match:self._attrs_match_cache = _attrs_match, stringelse:self._attrs_match_cache =ATTRS_MATCH(_match['']), stringelse:self._attrs_match_cache = self._match_cache = None, None", "docstring": "Initialize the object.", "id": "f8126:c0:m0"} {"signature": "@value.setterdef value(self, new_value: str) -> None:", "body": "m = self._matchoffset = m.start()s, e = m.span('')self[s - offset:e - offset] = new_value", "docstring": "Assign new_value to self.", "id": "f8126:c0:m3"} {"signature": "def set_attr(self, attr_name: str, attr_value: str) -> None:", "body": "cell_match = self._matchshadow = cell_match.stringattrs_start, attrs_end = cell_match.span('')if attrs_start != -:encoded_attr_name = attr_name.encode()attrs_m = ATTRS_MATCH(shadow, attrs_start, attrs_end)for i, n in enumerate(reversed(attrs_m.captures(''))):if n == encoded_attr_name:vs, ve = attrs_m.spans('')[-i - ]q = if attrs_m.string[ve] in b'' else self[vs - q:ve + q] = ''.format(attr_value)returnattr_end = cell_match.end('')fmt = '' if shadow[attr_end - ] == else ''self.insert(attr_end, fmt.format(attr_name, attr_value))returnfmt = '' if attr_value else ''if shadow[] == : self.insert(cell_match.start('') + ,fmt.format(attr_name, attr_value))returnself.insert(, fmt.format(attr_name, attr_value))return", "docstring": "Set the value for the given attribute name.\n\n If there are already multiple attributes with that name, only\n set the value for the last one.\n If attr_value == '', use the implicit empty attribute syntax.", "id": "f8126:c0:m5"} {"signature": "@propertydef text(self) -> Optional[str]:", "body": "head, pipe, tail = self._atomic_partition()if pipe:return tail[:-]return None", "docstring": "Return the text of this WikiLink. Do not include linktrail.", "id": "f8127:c0:m2"} {"signature": "def get_arg(self, name: str) -> Optional[Argument]:", "body": "return get_arg(name, reversed(self.arguments))", "docstring": "Return the last argument with the given name.\n\n Return None if no argument with that name is found.", "id": "f8130:c0:m4"} {"signature": "def del_arg(self, name: str) -> None:", "body": "for arg in reversed(self.arguments):if arg.name.strip(WS) == name.strip(WS):del arg[:]", "docstring": "Delete all arguments with the given then.", "id": "f8130:c0:m6"} {"signature": "def parse_pm_pf_tl(byte_array: bytearray, start: int, end: Optional[int],parameter_spans_append: Callable,pfunction_spans_append: Callable,template_spans_append: Callable,) -> None:", "body": "while True:for m in SINGLE_BRACES_FINDITER(byte_array, start, end):byte_array[m.start()] = match = Nonefor match in PM_PF_TL_FINDITER(byte_array, start, end):ms, me = match.span()if match[] is not None:parameter_spans_append([ms, me])elif match[] is not None:pfunction_spans_append([ms, me])elif match[] is not None: byte_array[ms:me] = b'' * (me - ms)continueelse:template_spans_append([ms, me])byte_array[ms:me] = b'' * (me - ms)if match is None:return", "docstring": "Find the spans of parameters, parser functions, and templates.\n\n :byte_array: The byte_array or part of byte_array that is being parsed.\n :start: Add to every returned start.\n\n This is the innermost loop of the parse_to_spans function.\n If the byte_array passed to parse_to_spans contains n WikiLinks, then\n this function will be called n + 1 times. One time for the whole byte_array\n and n times for each of the n WikiLinks.", "id": "f8131:m2"} {"signature": "def parse_to_spans(byte_array: bytearray) -> Dict[str, List[List[int]]]:", "body": "comment_spans = [] comment_spans_append = comment_spans.appendextension_tag_spans = [] extension_tag_spans_append = extension_tag_spans.appendwikilink_spans = [] wikilink_spans_append = wikilink_spans.appendparameter_spans = [] parameter_spans_append = parameter_spans.appendparser_function_spans = [] parser_function_spans_append = parser_function_spans.appendtemplate_spans = [] template_spans_append = template_spans.appendfor match in COMMENT_FINDITER(byte_array):ms, me = match.span()comment_spans_append([ms, me])byte_array[ms:me] = b'' * (me - ms)for match in EXTENSION_TAGS_FINDITER(byte_array):ms, me = match.span()extension_tag_spans_append([ms, me])if match[]: parse_tag_extensions(byte_array, ms, me,wikilink_spans_append,parameter_spans_append,parser_function_spans_append,template_spans_append)byte_array[ms:me] = b'' * (me - ms)while True:match = Nonefor match in WIKILINK_FINDITER(byte_array):ms, me = match.span()wikilink_spans_append([ms, me])parse_pm_pf_tl(byte_array, ms, me,parameter_spans_append,parser_function_spans_append,template_spans_append)byte_array[ms:me] = b'' * (me - ms)if match is None:breakparse_pm_pf_tl(byte_array, , None,parameter_spans_append,parser_function_spans_append,template_spans_append)return {'': sorted(comment_spans),'': sorted(extension_tag_spans),'': sorted(parameter_spans),'': sorted(parser_function_spans),'': sorted(template_spans),'': sorted(wikilink_spans)}", "docstring": "Calculate and set self._type_to_spans.\n\n The result is a dictionary containing lists of spans:\n {\n 'Comment': comment_spans,\n 'ExtTag': extension_tag_spans,\n 'Parameter': parameter_spans,\n 'ParserFunction': parser_function_spans,\n 'Template': template_spans,\n 'WikiLink': wikilink_spans,\n }", "id": "f8131:m0"} {"signature": "def append_default(self, new_default_name: str) -> None:", "body": "stripped_default_name = new_default_name.strip(WS)if stripped_default_name == self.name.strip(WS):returndig = Trueinnermost_param = selfwhile dig:dig = Falsedefault = innermost_param.defaultfor p in innermost_param.parameters:if p.string == default:if stripped_default_name == p.name.strip(WS):returninnermost_param = pdig = Trueinnermost_default = innermost_param.defaultif innermost_default is None:innermost_param.insert(-, '' + new_default_name + '')else:name = innermost_param.nameinnermost_param[len('' + name + ''):len('' + name + '' + innermost_default)] = '' + new_default_name + '' + innermost_default + ''", "docstring": "Append a new default parameter in the appropriate place.\n\n Add the new default to the innter-most parameter.\n If the parameter already exists among defaults, don't change anything.\n\n Example:\n >>> p = Parameter('{{{p1|{{{p2|}}}}}}')\n >>> p.append_default('p3')\n >>> p\n Parameter(\"'{{{p1|{{{p2|{{{p3|}}}}}}}}}'\")", "id": "f8132:c0:m5"} {"signature": "@text.setterdef text(self, newtext: str) -> None:", "body": "string = self.stringif string[] == '':text = self.textif text:self[-len(text) - :-] = newtextreturnself.insert(-, '' + newtext)returnself.insert(len(string), '' + newtext + '')self.insert(, '')", "docstring": "Set a new text.\n\n Automatically put the ExternalLink in brackets if it's not already.", "id": "f8133:c0:m3"} {"signature": "@propertydef in_brackets(self) -> bool:", "body": "return self[] == ''", "docstring": "Return true if the ExternalLink is in brackets. False otherwise.", "id": "f8133:c0:m4"} {"signature": "@propertydef url(self) -> str:", "body": "if self[] == '':return self[:URL_MATCH(self._ext_link_shadow, ).end()]return self.string", "docstring": "Return the url.", "id": "f8133:c0:m0"} {"signature": "@contents.setterdef contents(self, value: str) -> None:", "body": "level = self.levelif level == :self[:] = valuereturncontents = self.contentsstart = level + len(self.title) + level + self[start:start + len(contents)] = value", "docstring": "Set value as the contents of this section.", "id": "f8135:c0:m5"} {"signature": "@propertydef title(self) -> str:", "body": "level = self.levelif level == :return ''return self._atomic_partition()[].rstrip(WS)[level:-level]", "docstring": "Return title of this section. Return '' for lead sections.", "id": "f8135:c0:m2"} {"signature": "@level.setterdef level(self, value: int) -> None:", "body": "old_level = self.leveltitle = self.titlenew_equals = '' * valueself[:old_level + len(title) + old_level] =new_equals + title + new_equals", "docstring": "Change level of this section.", "id": "f8135:c0:m1"} {"signature": "@propertydef level(self) -> int:", "body": "m = HEADER_MATCH(self._shadow)if m:return len(m.group())return ", "docstring": "Return level of this section.\n\n Level is in range(1,7) or 0 for the lead section.", "id": "f8135:c0:m0"} {"signature": "def _lstrip_increase(shadow: bytearray, pos: int) -> int:", "body": "length = len(shadow)while pos < length and shadow[pos:pos + ].isspace():pos += return pos", "docstring": "Return the new position to lstrip the shadow.", "id": "f8136:m1"} {"signature": "@propertydef arguments(self) -> List[Argument]:", "body": "shadow = self._shadowsplit_spans = self._args_matcher(shadow).spans('')if not split_spans:return []arguments = []arguments_append = arguments.appendtype_to_spans = self._type_to_spansss, se = span = self._spantype_ = id(span)lststr = self._lststrstring = lststr[]arg_spans = type_to_spans.setdefault(type_, [])span_tuple_to_span_get = {(s[], s[]): s for s in arg_spans}.getfor arg_self_start, arg_self_end in split_spans:s, e = arg_span = [ss + arg_self_start, ss + arg_self_end]old_span = span_tuple_to_span_get((s, e))if old_span is None:insort(arg_spans, arg_span)else:arg_span = old_spanarg = Argument(lststr, type_to_spans, arg_span, type_)arg._shadow_cache = (string[s:e], shadow[arg_self_start:arg_self_end])arguments_append(arg)return arguments", "docstring": "Parse template content. Create self.name and self.arguments.", "id": "f8137:c0:m0"} {"signature": "def __delitem__(self, key: Union[slice, int]) -> None:", "body": "start, stop = self._check_index(key)lststr = self._lststrlststr0 = lststr[]lststr[] = lststr0[:start] + lststr0[stop:]self._shrink_update(start, stop)", "docstring": "Remove the specified range or character from self.string.\n\n Note: If an operation involves both insertion and deletion, it'll be\n safer to use the `insert` function first. Otherwise there is a\n possibility of insertion into the wrong spans.", "id": "f8138:c0:m8"} {"signature": "def __init__(self,string: Union[MutableSequence[str], str],_type_to_spans: Dict[str, List[List[int]]] = None,) -> None:", "body": "if _type_to_spans is not None:self._type_to_spans = _type_to_spansself._lststr = string returnself._lststr = [string]span = self._span = [, len(string)]byte_array = bytearray(string, '', '')_type = self._typeif _type not in SPAN_PARSER_TYPES:type_to_spans = self._type_to_spans = parse_to_spans(byte_array)type_to_spans[_type] = [span]self._shadow_cache = string, byte_arrayelse:head = byte_array[:]tail = byte_array[-:]byte_array[-:] = byte_array[:] = b''type_to_spans = parse_to_spans(byte_array)self._shadow_cache = string, byte_arraytype_to_spans[_type].insert(, span)self._type_to_spans = type_to_spansbyte_array[:] = headbyte_array[-:] = tail", "docstring": "Initialize the object.\n\n Set the initial values for self._lststr, self._type_to_spans.\n\n :param string: The string to be parsed or a list containing the string\n of the parent object.\n :param _type_to_spans: If the lststr is already parsed, pass its\n _type_to_spans property as _type_to_spans to avoid parsing it\n again.", "id": "f8138:c0:m0"} {"signature": "@propertydef tables(self) -> List['']:", "body": "tables = [] tables_append = tables.appendtype_to_spans = self._type_to_spanslststr = self._lststrshadow = self._shadow[:]ss, se = self._spanspans = type_to_spans.setdefault('', [])if not spans:m = True while m:m = Falsefor m in TABLE_FINDITER(shadow):ms, me = m.span()span = [ss + ms + len(m[]), ss + me]spans.append(span)tables_append(Table(lststr, type_to_spans, span, ''))shadow[ms:me] = b'' * (me - ms)return tablesspan_tuple_to_span_get = {(s[], s[]): s for s in spans}.getm = Truewhile m:m = Falsefor m in TABLE_FINDITER(shadow):ms, me = m.span()s, e = ss + ms + len(m[]), ss + meold_span = span_tuple_to_span_get((s, e))if old_span is None:span = [s, e]insort(spans, span)else:span = old_spantables_append(Table(lststr, type_to_spans, span, ''))shadow[ms:me] = b'' * (me - ms)return tables", "docstring": "Return a list of found table objects.", "id": "f8138:c0:m32"} {"signature": "def __contains__(self, value: Union[str, '']) -> bool:", "body": "if isinstance(value, str):return value in self.stringif self._lststr is not value._lststr:return Falseps, pe = value._spanss, se = self._spanif ss <= ps and se >= pe:return Truereturn False", "docstring": "Return True if parsed_wikitext is inside self. False otherwise.\n\n Also self and parsed_wikitext should belong to the same parsed\n wikitext object for this function to return True.", "id": "f8138:c0:m3"} {"signature": "@staticmethoddef parent(type_: Optional[str] = None) -> None:", "body": "return None", "docstring": "Return None (The parent of the root node is None).", "id": "f8138:c0:m36"} {"signature": "@propertydef sections(self) -> List['']:", "body": "sections = [] sections_append = sections.appendtype_to_spans = self._type_to_spanslststr = self._lststrss, se = _span = self._spantype_spans = type_to_spans.setdefault('', [])full_match = SECTIONS_FULLMATCH(self._shadow)section_spans = full_match.spans('')levels = [len(eq) for eq in full_match.captures('')]if not type_spans:spans_append = type_spans.appendfor current_index, (current_level, (s, e)) in enumerate(zip(levels, section_spans), ):for section_index, section_level in enumerate(levels[current_index:], current_index):if current_level and section_level > current_level:e = section_spans[section_index][]else:breakspan = [ss + s, ss + e]spans_append(span)sections_append(Section(lststr, type_to_spans, span, ''))return sectionsspan_tuple_to_span = {(s[], s[]): s for s in type_spans}.getfor current_index, (current_level, (s, e)) in enumerate(zip(levels, section_spans), ):for section_index, section_level in enumerate(levels[current_index:], current_index):if current_level and section_level > current_level:e = section_spans[section_index][]else:breaks, e = ss + s, ss + eold_span = span_tuple_to_span((s, e))if old_span is None:span = [s, e]insort(type_spans, span)else:span = old_spansections_append(Section(lststr, type_to_spans, span, ''))return sections", "docstring": "Return a list of section in current wikitext.\n\n The first section will always be the lead section, even if it is an\n empty string.", "id": "f8138:c0:m31"} {"signature": "def _subspans(self, _type: str) -> Generator[int, None, None]:", "body": "ss, se = self._spanspans = self._type_to_spans[_type]b = bisect(spans, [ss])for span in spans[b:bisect(spans, [se], b)]:if span[] <= se:yield span", "docstring": "Yield all the sub-span indices excluding self._span.", "id": "f8138:c1:m1"} {"signature": "def __repr__(self) -> str:", "body": "return ''.format(type(self).__name__, repr(self.string))", "docstring": "Return the string representation of self.", "id": "f8138:c0:m2"} {"signature": "def __str__(self) -> str:", "body": "return self.string", "docstring": "Return self-object as a string.", "id": "f8138:c0:m1"} {"signature": "def _subspans(self, type_: str) -> List[List[int]]:", "body": "return self._type_to_spans[type_]", "docstring": "Return all the sub-span including self._span.", "id": "f8138:c0:m15"} {"signature": "def pprint(self, indent: str = '', remove_comments=False):", "body": "warn('',DeprecationWarning,)return self.pformat(indent, remove_comments)", "docstring": "Deprecated, use self.pformat instead.", "id": "f8138:c0:m23"} {"signature": "def _shrink_update(self, rmstart: int, rmstop: int) -> None:", "body": "for spans in self._type_to_spans.values():i = len(spans) - while i >= :s, e = span = spans[i]if rmstop <= s:rmlength = rmstop - rmstartspan[:] = s - rmlength, e - rmlengthi -= continuebreakelse:continuewhile True:if rmstart <= s:if rmstop < e:span[:] = rmstart, e + rmstart - rmstopi -= if i < :breaks, e = span = spans[i]continuespans.pop(i)[:] = -, -i -= if i < :breaks, e = span = spans[i]continuebreakwhile i >= :if e <= rmstart:i -= if i < :breaks, e = span = spans[i]continuespan[] -= rmstop - rmstarti -= if i < :breaks, e = span = spans[i]continue", "docstring": "Update self._type_to_spans according to the removed span.\n\n Warning: If an operation involves both _shrink_update and\n _insert_update, you might wanna consider doing the\n _insert_update before the _shrink_update as this function\n can cause data loss in self._type_to_spans.", "id": "f8138:c0:m17"} {"signature": "@string.deleterdef string(self) -> None:", "body": "del self[:]", "docstring": "Set a new string for this object. Note the old data will be lost.", "id": "f8138:c0:m13"} {"signature": "@contents.setterdef contents(self, contents: str) -> None:", "body": "match = self._matchstart, end = match.span('')if start != -:self[start:end] = contentselse:s, e = match.span('')self[s:e] = ''.format(contents, match[''].decode())", "docstring": "Set new contents.\n\n Note that if the tag is self-closing, then it will be expanded to\n have a start tag and an end tag. For example:\n >>> t = Tag('')\n >>> t.contents = 'n'\n >>> t.string\n 'n'", "id": "f8139:c1:m4"} {"signature": "@propertydef _match(self) -> Any:", "body": "cached_match, cached_string = self._match_cachestring = self.stringif cached_string == string:return cached_matchmatch = TAG_FULLMATCH(self._shadow)self._match_cache = match, stringreturn match", "docstring": "Return the match object for the current tag. Cache the result.", "id": "f8139:c1:m0"} {"signature": "@propertydef name(self) -> str:", "body": "return self._match[''].decode()", "docstring": "Return tag name.", "id": "f8139:c1:m1"} {"signature": "@propertydef items(self) -> List[str]:", "body": "items = [] append = items.appendstring = self.stringmatch = self._matchms = match.start()for s, e in match.spans(''):append(string[s - ms:e - ms])return items", "docstring": "Return items as a list of strings.\n\n Don't include sub-items and the start pattern.", "id": "f8140:c0:m2"} {"signature": "@classmethoddef readonly(cls, *args, **kwargs):", "body": "fridge = cls(*args, **kwargs)fridge.close()return fridge", "docstring": "Return an already closed read-only instance of Fridge.\nArguments are the same as for the constructor.", "id": "f8144:c0:m0"} {"signature": "def compare_waterfall_fil_to_h5_methods_and_attributes():", "body": "a = bl.Waterfall(voyager_h5)b = bl.Waterfall(voyager_fil)print(\"\")assert a.beam_axis == b.beam_axisassert a.freq_axis == b.freq_axisassert a.time_axis == b.time_axisassert a.calc_n_coarse_chan() == b.calc_n_coarse_chan()assert a.file_shape == b.file_shapeassert a.n_channels_in_file == b.n_channels_in_fileassert a.n_ints_in_file == b.n_ints_in_fileassert a.selection_shape == b.selection_shapeprint(\"\")a.populate_freqs()a.populate_timestamps()a.info()a.blank_dc()a.calibrate_band_pass_N1()b.populate_freqs()b.populate_timestamps()b.info()b.blank_dc()b.calibrate_band_pass_N1()dir_a = dir(a)dir_b = dir(b)print(\"\")for item in dir_a:if item not in dir_b:print(item)print(\"\")for item in dir_b:if item not in dir_a:print(item)", "docstring": "Compare attributes and check methods", "id": "f8156:m1"} {"signature": "def find_header_size(filename):", "body": "filfile=open(filename,'')filfile.seek()round1 = filfile.read()headersize = round1.find('')+len('')return headersize", "docstring": "Script to find the header size of a filterbank file", "id": "f8160:m2"} {"signature": "def write_polfils(str, str_I, **kwargs):", "body": "lin,circ=fracpols(str, **kwargs)obs = Waterfall(str_I, max_load=)obs.data = linobs.write_to_fil(str[:-]+'') obs.data = circobs.write_to_fil(str[:-]+'')", "docstring": "Writes two new filterbank files containing fractional linear and\n circular polarization data", "id": "f8161:m8"} {"signature": "def convert_to_coarse(data,chan_per_coarse):", "body": "num_coarse = data.size/chan_per_coarsedata_shaped = np.array(np.reshape(data,(num_coarse,chan_per_coarse)))return np.mean(data_shaped[:,:-],axis=)", "docstring": "Converts a data array with length n_chans to an array of length n_coarse_chans\nby averaging over the coarse channels", "id": "f8161:m1"} {"signature": "def fracpols(str, **kwargs):", "body": "I,Q,U,V,L=get_stokes(str, **kwargs)return L/I,V/I", "docstring": "Output fractional linear and circular polarizations for a\n rawspec cross polarization .fil file. NOT STANDARD USE", "id": "f8161:m6"} {"signature": "def plot_fullcalib(dio_cross,feedtype='',**kwargs):", "body": "plt.figure(\"\", figsize=(,))left, width = ,bottom, height = ,width2 = bottom2, height2 = ,rect_uncal = [left,bottom,width,height]rect_cal = [left+width+,bottom,width,height]rect_fold = [left,bottom2,width2,]rect_gain1 = [left+width2+,bottom2,width2,height2]rect_phase1 = [left+width2*+*,bottom2,width2,height2]rect_gain2 = [left+width2+,bottom2+height2+,width2,height2]rect_phase2 = [left+width2*+*,bottom2+height2+,width2,height2]axFold = plt.axes(rect_fold)print('')plot_diode_fold(dio_cross,bothfeeds=False,feedtype=feedtype,min_samp=,max_samp=,legend=False,**kwargs)print('')plot_gain_offsets(dio_cross,feedtype=feedtype,ax1=rect_gain2,ax2=rect_gain1,legend=False,**kwargs)print('')plot_phase_offsets(dio_cross,feedtype=feedtype,ax1=rect_phase1,ax2=rect_phase2,legend=False,**kwargs)plt.ylabel('')ax_uncal = plt.axes(rect_uncal)print('')plot_Stokes_diode(dio_cross,feedtype=feedtype,**kwargs)ax_cal = plt.axes(rect_cal,sharey=ax_uncal)print('')plot_calibrated_diode(dio_cross,feedtype=feedtype,**kwargs)plt.ylabel('')plt.setp(ax_cal.get_yticklabels(),visible=False)plt.savefig(dio_cross[:-]+'',dpi=)plt.show()", "docstring": "Generates and shows five plots: Uncalibrated diode, calibrated diode, fold information,\nphase offsets, and gain offsets for a noise diode measurement. Most useful diagnostic plot to\nmake sure calibration proceeds correctly.", "id": "f8164:m6"} {"signature": "def plot_calibrated_diode(dio_cross,chan_per_coarse=,feedtype='',**kwargs):", "body": "obs = Waterfall(dio_cross,max_load=)freqs = obs.populate_freqs()tsamp = obs.header['']data = obs.dataobs = NoneI,Q,U,V = get_stokes(data,feedtype)data = Nonepsis = phase_offsets(I,Q,U,V,tsamp,chan_per_coarse,feedtype,**kwargs)G = gain_offsets(I,Q,U,V,tsamp,chan_per_coarse,feedtype,**kwargs)I,Q,U,V = apply_Mueller(I,Q,U,V,G,psis,chan_per_coarse,feedtype)I_OFF,I_ON = foldcal(I,tsamp,**kwargs)Q_OFF,Q_ON = foldcal(Q,tsamp,**kwargs)U_OFF,U_ON = foldcal(U,tsamp,**kwargs)V_OFF,V_ON = foldcal(V,tsamp,**kwargs)I = NoneQ = NoneU = NoneV = Noneplt.plot(freqs,I_ON-I_OFF,'',label='')plt.plot(freqs,Q_ON-Q_OFF,'',label='')plt.plot(freqs,U_ON-U_OFF,'',label='')plt.plot(freqs,V_ON-V_OFF,'',label='')plt.legend()plt.xlabel('')plt.title('')plt.ylabel('')", "docstring": "Plots the corrected noise diode spectrum for a given noise diode measurement\nafter application of the inverse Mueller matrix for the electronics chain.", "id": "f8164:m2"} {"signature": "def get_diff(dio_cross,feedtype,**kwargs):", "body": "obs = Waterfall(dio_cross,max_load=)freqs = obs.populate_freqs()tsamp = obs.header['']data = obs.dataobs = NoneI,Q,U,V = get_stokes(data,feedtype)I_OFF,I_ON = foldcal(I,tsamp,**kwargs)Q_OFF,Q_ON = foldcal(Q,tsamp,**kwargs)U_OFF,U_ON = foldcal(U,tsamp,**kwargs)V_OFF,V_ON = foldcal(V,tsamp,**kwargs)Idiff = I_ON-I_OFFQdiff = Q_ON-Q_OFFUdiff = U_ON-U_OFFVdiff = V_ON-V_OFFreturn Idiff,Qdiff,Udiff,Vdiff,freqs", "docstring": "Returns ON-OFF for all Stokes parameters given a cross_pols noise diode measurement", "id": "f8164:m0"} {"signature": "def read_header(self):", "body": "start_idx = self.file_obj.tell()key, val = '', ''header_dict = {}keep_reading = Truefirst_line = self.file_objtry:while keep_reading:if start_idx + > self.filesize:keep_reading = Falseraise EndOfFileError(\"\")line = self.file_obj.read()if PYTHON3:line = line.decode(\"\")if line.startswith(''):keep_reading = Falsebreakelse:key, val = line.split('')key, val = key.strip(), val.strip()if \"\" in val:val = str(val.strip(\"\").strip())elif \"\" in val:val = float(val)else:val = int(val)header_dict[key] = valexcept ValueError:print(\"\", line)print(\"\", start_idx)print(\"\", self.filesize)print(\"\")print(self.file_obj.read())raisedata_idx = self.file_obj.tell()if \"\" in header_dict.keys():if int(header_dict[\"\"]) == :if data_idx % :data_idx += ( - data_idx % )self.file_obj.seek(start_idx)return header_dict, data_idx", "docstring": "Read next header (multiple headers in file)\n\n Returns:\n (header, data_idx) - a dictionary of keyword:value header data and\n also the byte index of where the corresponding data block resides.", "id": "f8167:c1:m4"} {"signature": "def generate_filterbank_header(self, nchans=, ):", "body": "gp_head = self.read_first_header()fb_head = {}telescope_str = gp_head.get(\"\", \"\")if telescope_str in ('', ''):fb_head[\"\"] = elif telescope_str in ('', ''):fb_head[\"\"] = else:fb_head[\"\"] = fb_head[\"\"] = gp_head.get(\"\", \"\")fb_head[\"\"] = gp_head.get(\"\", )fb_head[\"\"] = gp_head.get(\"\", )fb_head[\"\"] = Angle(str(gp_head.get(\"\", )) + \"\")fb_head[\"\"] = Angle(str(gp_head.get(\"\", )) + \"\")fb_head[\"\"] = self.filenamefb_head[\"\"] = fb_head[\"\"] = fb_head[\"\"] = fb_head[\"\"] = fb_head[\"\"] = fb_head[\"\"] = fb_head[\"\"] = fb_head[\"\"] = fb_head[\"\"] = / nchansfb_head[\"\"] = nchansfb_head[\"\"] = fb_head[\"\"] = return fb_head", "docstring": "Generate a blimpy header dictionary", "id": "f8167:c1:m16"} {"signature": "def read_first_header(self):", "body": "self.file_obj.seek()header_dict, pos = self.read_header()self.file_obj.seek()return header_dict", "docstring": "Read first header in file\n\n Returns:\n header (dict): keyword:value pairs of header metadata", "id": "f8167:c1:m5"} {"signature": "def __exit__(self, exception_type, exception_value, traceback):", "body": "self.file_obj.close()", "docstring": "closes the file after `with` block has exited\n:param exception_type:\n:param exception_value:\n:param traceback:\n:return:", "id": "f8167:c1:m2"} {"signature": "def plot_spectrum(self, filename=None, plot_db=True):", "body": "header, data = self.read_next_data_block()print(\"\")d_xx_fft = np.abs(np.fft.fft(data[..., ]))d_xx_fft = d_xx_fft.flatten()dec_fac_x = if d_xx_fft.shape[] > MAX_PLT_POINTS:dec_fac_x = d_xx_fft.shape[] / MAX_PLT_POINTSd_xx_fft = rebin(d_xx_fft, dec_fac_x)print(\"\")if plot_db:plt.plot( * np.log10(d_xx_fft))plt.ylabel(\"\")else:plt.plot(d_xx_fft)plt.ylabel(\"\")plt.xlabel(\"\")plt.title(self.filename)if filename:plt.savefig(filename)plt.show()", "docstring": "Do a (slow) numpy FFT and take power of data", "id": "f8167:c1:m15"} {"signature": "def __get_blob_dimensions(self, chunk_dim):", "body": "if self.selection_shape[self.freq_axis] > chunk_dim[self.freq_axis]*MAX_BLOB_MB:freq_axis_size = self.selection_shape[self.freq_axis]while freq_axis_size > chunk_dim[self.freq_axis]*MAX_BLOB_MB:freq_axis_size /= time_axis_size = else:freq_axis_size = self.selection_shape[self.freq_axis]time_axis_size = np.min([chunk_dim[self.time_axis] * MAX_BLOB_MB * chunk_dim[self.freq_axis] / freq_axis_size, self.selection_shape[self.time_axis]])blob_dim = (int(time_axis_size), , freq_axis_size)return blob_dim", "docstring": "Sets the blob dimmentions, trying to read around 1024 MiB at a time.\n This is assuming a chunk is about 1 MiB.", "id": "f8168:c0:m13"} {"signature": "def __get_chunk_dimensions(self):", "body": "if np.abs(self.header[b'']) < :logger.info('')chunk_dim = (,,) return chunk_dimelif np.abs(self.header[b'']) < :logger.info('')chunk_dim = (,,) return chunk_dimelif np.abs(self.header[b'']) < and np.abs(self.header[b'']) >= :logger.info('')chunk_dim = (,,) chunk_dim = (,,/)return chunk_dimelse:logger.warning('')chunk_dim = (,,)return chunk_dim", "docstring": "Sets the chunking dimmentions depending on the file type.", "id": "f8168:c0:m14"} {"signature": "def __write_to_fil_heavy(self, filename_out, *args, **kwargs):", "body": "chunk_dim = self.__get_chunk_dimensions()blob_dim = self.__get_blob_dimensions(chunk_dim)n_blobs = self.container.calc_n_blobs(blob_dim)n_bytes = self.header[b''] / with open(filename_out, \"\") as fileh:fileh.write(generate_sigproc_header(self)) logger.info(''% n_blobs)for ii in range(, n_blobs):logger.info('' % (ii + , n_blobs))bob = self.container.read_blob(blob_dim,n_blob=ii)with open(filename_out, \"\") as fileh:j = bobif n_bytes == :np.float32(j.ravel()).tofile(fileh)elif n_bytes == :np.int16(j.ravel()).tofile(fileh)elif n_bytes == :np.int8(j.ravel()).tofile(fileh)", "docstring": "Write data to .fil file.\n\n Args:\n filename_out (str): Name of output file", "id": "f8168:c0:m8"} {"signature": "def calc_n_coarse_chan(self, chan_bw=None):", "body": "n_coarse_chan = self.container.calc_n_coarse_chan(chan_bw)return n_coarse_chan", "docstring": "This makes an attempt to calculate the number of coarse channels in a given freq selection.\n It assumes for now that a single coarse channel is 2.9296875 MHz", "id": "f8168:c0:m15"} {"signature": "def __write_to_hdf5_light(self, filename_out, *args, **kwargs):", "body": "block_size = with h5py.File(filename_out, '') as h5:h5.attrs[b''] = b''h5.attrs[b''] = b''if HAS_BITSHUFFLE:bs_compression = bitshuffle.h5.H5FILTERbs_compression_opts = (block_size, bitshuffle.h5.H5_COMPRESS_LZ4)else:bs_compression = Nonebs_compression_opts = Nonelogger.warning(\"\")dset = h5.create_dataset('',data=self.data,compression='')compression=bs_compression,compression_opts=bs_compression_opts)dset_mask = h5.create_dataset('',shape=self.file_shape,compression='',compression=bs_compression,compression_opts=bs_compression_opts,dtype='')dset.dims[].label = b\"\"dset.dims[].label = b\"\"dset.dims[].label = b\"\"dset_mask.dims[].label = b\"\"dset_mask.dims[].label = b\"\"dset_mask.dims[].label = b\"\"for key, value in self.header.items():dset.attrs[key] = value", "docstring": "Write data to HDF5 file in one go.\n\n Args:\n filename_out (str): Name of output file", "id": "f8168:c0:m12"} {"signature": "def grab_data(self, f_start=None, f_stop=None,t_start=None, t_stop=None, if_id=):", "body": "self.freqs = self.populate_freqs()self.timestamps = self.populate_timestamps()if f_start is None:f_start = self.freqs[]if f_stop is None:f_stop = self.freqs[-]i0 = np.argmin(np.abs(self.freqs - f_start))i1 = np.argmin(np.abs(self.freqs - f_stop))if i0 < i1:plot_f = self.freqs[i0:i1 + ]plot_data = np.squeeze(self.data[t_start:t_stop, ..., i0:i1 + ])else:plot_f = self.freqs[i1:i0 + ]plot_data = np.squeeze(self.data[t_start:t_stop, ..., i1:i0 + ])return plot_f, plot_data", "docstring": "Extract a portion of data by frequency range.\n\n Args:\n f_start (float): start frequency in MHz\n f_stop (float): stop frequency in MHz\n if_id (int): IF input identification (req. when multiple IFs in file)\n\n Returns:\n (freqs, data) (np.arrays): frequency axis in MHz and data subset", "id": "f8168:c0:m16"} {"signature": "def populate_timestamps(self,update_header=False):", "body": "ii_start, ii_stop = , self.n_ints_in_fileif self.t_start:ii_start = self.t_startif self.t_stop:ii_stop = self.t_stopt0 = self.header[b'']t_delt = self.header[b'']if update_header:timestamps = ii_start * t_delt / // + t0else:timestamps = np.arange(ii_start, ii_stop) * t_delt / // + t0return timestamps", "docstring": "Populate time axis.\n IF update_header then only return tstart", "id": "f8172:c0:m8"} {"signature": "def _find_blob_start(self):", "body": "self._setup_chans()blob_time_start = self.t_startblob_freq_start = self.chan_start_idxblob_start = blob_time_start * self.n_channels_in_file + blob_freq_startreturn blob_start", "docstring": "Find first blob from selection.", "id": "f8172:c2:m4"} {"signature": "def _setup_selection_range(self, f_start=None, f_stop=None, t_start=None, t_stop=None, init=False):", "body": "if init is True:if t_start is None:t_start = self.t_beginif t_stop is None:t_stop = self.t_endif f_start is None:f_start = self.f_beginif f_stop is None:f_stop = self.f_endelse:if f_start is None:f_start = self.f_startif f_stop is None:f_stop = self.f_stopif t_start is None:t_start = self.t_startif t_stop is None:t_stop = self.t_stopif t_stop >= and t_start >= and t_stop < t_start:t_stop, t_start = t_start,t_stoplogger.warning('')if f_stop and f_start and f_stop < f_start:f_stop, f_start = f_start,f_stoplogger.warning('')if t_start >= self.t_begin and t_start < self.t_end:self.t_start = int(t_start)else:if init is False or t_start != None:logger.warning(''%self.t_begin)self.t_start = self.t_beginif t_stop <= self.t_end and t_stop > self.t_begin:self.t_stop = int(t_stop)else:if init is False or t_stop:logger.warning(''%self.t_end)self.t_stop = self.t_endif f_start >= self.f_begin and f_start < self.f_end:self.f_start = f_startelse:if init is False or f_start:logger.warning(''%self.f_begin)self.f_start = self.f_beginif f_stop <= self.f_end and f_stop > self.f_begin:self.f_stop = f_stopelse:if init is False or f_stop:logger.warning(''%self.f_end)self.f_stop = self.f_endself.selection_shape = self._calc_selection_shape()", "docstring": "Making sure the selection if time and frequency are within the file limits.\n\n Args:\n init (bool): If call during __init__", "id": "f8172:c0:m1"} {"signature": "def _setup_n_ints_in_file(self):", "body": "self.n_ints_in_file = sigproc.calc_n_ints_in_file(self.filename)", "docstring": "Calculate the number of integrations in the file.", "id": "f8172:c2:m1"} {"signature": "def cmd_tool(args=None):", "body": "from argparse import ArgumentParserif not HAS_BITSHUFFLE:print(\"\")exit()parser = ArgumentParser(description=\"\")parser.add_argument('', type=str, help='')args = parser.parse_args()fileroot = args.filename.split('')[]filelist = glob.glob(fileroot + '')filelist = sorted(filelist)r = GuppiRaw(filelist[])header, data = r.read_next_data_block()dshape = data.shape print(dshape)n_blocks_total = for filename in filelist:print(filename)r = GuppiRaw(filename)n_blocks_total += r.n_blocksprint(n_blocks_total)full_dshape = np.concatenate(((n_blocks_total,), dshape))h5 = h5py.File(fileroot + '', '')h5.attrs[''] = ''block_size = dset = h5.create_dataset('',shape=full_dshape,dtype=data.dtype) h5_idx = for filename in filelist:print(\"\" % filename)r = GuppiRaw(filename)h5 = h5py.File(filename + '', '')header, data = r.read_next_data_block()for ii in range(, r.n_blocks):t0 = time.time()print(\"\" % (h5_idx+, full_dshape[]))header, data = r.read_next_data_block()t1 = time.time()t2 = time.time()print(\"\" % (h5_idx+, full_dshape[]))dset[h5_idx, :] = datat3 = time.time()print(\"\" % ((t1-t0), (t3-t2)))h5_idx += for key, value in header.items():dset.attrs[key] = valueh5.close()t1 = time.time()print(\"\" % (t1- t0))", "docstring": "Command line tool for converting guppi raw into HDF5 versions of guppi raw", "id": "f8173:m0"} {"signature": "def read_header(filename, return_idxs=False):", "body": "with open(filename, '') as fh:header_dict = {}header_idxs = {}keyword, value, idx = read_next_header_keyword(fh)try:assert keyword == b''except AssertionError:raise RuntimeError(\"\")while True:keyword, value, idx = read_next_header_keyword(fh)if keyword == b'':breakelse:header_dict[keyword] = valueheader_idxs[keyword] = idxif return_idxs:return header_idxselse:return header_dict", "docstring": "Read blimpy header and return a Python dictionary of key:value pairs\n\n Args:\n filename (str): name of file to open\n\n Optional args:\n return_idxs (bool): Default False. If true, returns the file offset indexes\n for values\n\n returns", "id": "f8174:m3"} {"signature": "def calc_n_ints_in_file(filename):", "body": "h = read_header(filename)n_bytes = int(h[b''] / )n_chans = h[b'']n_ifs = h[b'']idx_data = len_header(filename)f = open(filename, '')f.seek(idx_data)filesize = os.path.getsize(filename)n_bytes_data = filesize - idx_dataif h[b''] == :n_ints = int( * n_bytes_data / (n_chans * n_ifs))else:n_ints = int(n_bytes_data / (n_bytes * n_chans * n_ifs))return n_ints", "docstring": "Calculate number of integrations in a given file", "id": "f8174:m9"} {"signature": "def read_next_header_keyword(fh):", "body": "n_bytes = np.fromstring(fh.read(), dtype='')[]if n_bytes > :n_bytes = keyword = fh.read(n_bytes)if keyword == b'' or keyword == b'':return keyword, , fh.tell()else:dtype = header_keyword_types[keyword]idx = fh.tell()if dtype == b'':val = struct.unpack(dtype, fh.read())[]if dtype == b'':val = struct.unpack(dtype, fh.read())[]if dtype == b'':str_len = np.fromstring(fh.read(), dtype='')[]val = fh.read(str_len)if dtype == b'':val = struct.unpack('', fh.read())[]val = fil_double_to_angle(val)if keyword == b'':val = Angle(val, unit=u.hour)else:val = Angle(val, unit=u.deg)return keyword, val, idx", "docstring": "Args:\n fh (file): file handler\n\nReturns:", "id": "f8174:m1"} {"signature": "def unpack(data, nbit):", "body": "if nbit > :raise ValueError(\"\")if % nbit != :raise ValueError(\"\")if data.dtype not in (np.uint8, np.int8):raise TypeError(\"\")if nbit == :return dataelif nbit == :data = unpack_4to8(data)return dataelif nbit == :data = unpack_2to8(data)return dataelif nbit == :data = unpack_1to8(data)return data", "docstring": "upgrade data from nbits to 8bits\n\n Notes: Pretty sure this function is a little broken!", "id": "f8175:m4"} {"signature": "def rebin(d, n_x, n_y=None):", "body": "if d.ndim == :if n_y is None:n_y = if n_x is None:n_x = d = d[:int(d.shape[] // n_x) * n_x, :int(d.shape[] // n_y) * n_y]d = d.reshape((d.shape[] // n_x, n_x, d.shape[] // n_y, n_y))d = d.mean(axis=)d = d.mean(axis=)elif d.ndim == :d = d[:int(d.shape[] // n_x) * n_x]d = d.reshape((d.shape[] // n_x, n_x))d = d.mean(axis=)else:raise RuntimeError(\"\")return d", "docstring": "Rebin data by averaging bins together\n\n Args:\n d (np.array): data\n n_x (int): number of bins in x dir to rebin into one\n n_y (int): number of bins in y dir to rebin into one\n\n Returns:\n d: rebinned data with shape (n_x, n_y)", "id": "f8175:m3"} {"signature": "def compute_lst(self):", "body": "if self.header[b''] == :self.coords = gbt_coordselif self.header[b''] == :self.coords = parkes_coordselse:raise RuntimeError(\"\")if HAS_SLALIB:dut1 = mjd = self.header[b'']tellong = np.deg2rad(self.coords[])last = s.sla_gmst(mjd) - tellong + s.sla_eqeqx(mjd) + dut1if last < : last = last + *np.pireturn lastelse:raise RuntimeError(\"\")", "docstring": "Compute LST for observation", "id": "f8176:c0:m6"} {"signature": "def cmd_tool(args=None):", "body": "from argparse import ArgumentParserparser = ArgumentParser(description=\"\")parser.add_argument('', action='', default='', dest='', type=str,help='')parser.add_argument('', type=str,help='')parser.add_argument('', action='', default=None, dest='', type=float,help='')parser.add_argument('', action='', default=None, dest='', type=float,help='')parser.add_argument('', action='', default=None, dest='', type=int,help='')parser.add_argument('', action='', default=None, dest='', type=int,help='')parser.add_argument('', action='', default=False, dest='',help='')parser.add_argument('', action='', default=False, dest='',help='')parser.add_argument('', action='', default='', dest='', type=str,help='')parser.add_argument('', action='', default=False, dest='',help='')parser.add_argument('', action='', default=True, dest='',help='')parser.add_argument('', action='', default=False, dest='',help='')args = parser.parse_args()filename = args.filenameload_data = not args.info_onlywtp = args.what_to_plotif not wtp or '' in wtp:if args.t_start == None:t_start = else:t_start = args.t_startt_stop = t_start + if args.average:t_start = Nonet_stop = Noneelse:t_start = args.t_startt_stop = args.t_stopif args.info_only:args.blank_dc = Falseargs.calibrate_band_pass = Falsefil = Filterbank(filename, f_start=args.f_start, f_stop=args.f_stop,t_start=t_start, t_stop=t_stop,load_data=load_data,blank_dc=args.blank_dc,cal_band_pass=args.calibrate_band_pass)fil.info()if not args.info_only:if args.what_to_plot == \"\":plt.figure(\"\", figsize=(, ))fil.plot_waterfall(f_start=args.f_start, f_stop=args.f_stop)elif args.what_to_plot == \"\":plt.figure(\"\", figsize=(, ))fil.plot_spectrum(logged=True, f_start=args.f_start, f_stop=args.f_stop, t='')elif args.what_to_plot == \"\":plt.figure(\"\", figsize=(, ))fil.plot_spectrum_min_max(logged=True, f_start=args.f_start, f_stop=args.f_stop, t='')elif args.what_to_plot == \"\":plt.figure(\"\", figsize=(, ))fil.plot_kurtosis(f_start=args.f_start, f_stop=args.f_stop)elif args.what_to_plot == \"\":plt.figure(\"\", figsize=(, ))fil.plot_time_series(f_start=args.f_start, f_stop=args.f_stop,orientation='')elif args.what_to_plot == \"\":plt.figure(\"\", figsize=(, ),facecolor='')fil.plot_all(logged=True, f_start=args.f_start, f_stop=args.f_stop, t='')elif args.what_to_plot == \"\":plt.figure(\"\", figsize=(, ),facecolor='')fil.plot_all(logged=True, f_start=args.f_start, f_stop=args.f_stop, t='',kurtosis=False)if args.plt_filename != '':plt.savefig(args.plt_filename)if not args.save_only:if '' in os.environ.keys():plt.show()else:print(\"\")", "docstring": "Command line tool for plotting and viewing info on filterbank files", "id": "f8176:m0"} {"signature": "def grab_data(self, f_start=None, f_stop=None, t_start=None, t_stop=None, if_id=):", "body": "if f_start is None:f_start = self.freqs[]if f_stop is None:f_stop = self.freqs[-]i0 = np.argmin(np.abs(self.freqs - f_start))i1 = np.argmin(np.abs(self.freqs - f_stop))if i0 < i1:plot_f = self.freqs[i0:i1 + ]plot_data = np.squeeze(self.data[t_start:t_stop, ..., i0:i1 + ])else:plot_f = self.freqs[i1:i0 + ]plot_data = np.squeeze(self.data[t_start:t_stop, ..., i1:i0 + ])return plot_f, plot_data", "docstring": "Extract a portion of data by frequency range.\n\n Args:\n f_start (float): start frequency in MHz\n f_stop (float): stop frequency in MHz\n if_id (int): IF input identification (req. when multiple IFs in file)\n\n Returns:\n (freqs, data) (np.arrays): frequency axis in MHz and data subset", "id": "f8176:c0:m11"} {"signature": "def _setup_time_axis(self, t_start=None, t_stop=None):", "body": "ii_start, ii_stop = , self.n_ints_in_fileif t_start:ii_start = t_startif t_stop:ii_stop = t_stopn_ints = ii_stop - ii_startt0 = self.header[b'']t_delt = self.header[b'']self.timestamps = np.arange(, n_ints) * t_delt / // + t0return ii_start, ii_stop, n_ints", "docstring": "Setup time axis.", "id": "f8176:c0:m4"} {"signature": "def _setup_freqs(self, f_start=None, f_stop=None):", "body": "f0 = self.header[b'']f_delt = self.header[b'']i_start, i_stop = , self.header[b'']if f_start:i_start = int((f_start - f0) / f_delt)if f_stop:i_stop = int((f_stop - f0) / f_delt)chan_start_idx = np.int(i_start)chan_stop_idx = np.int(i_stop)if i_start < i_stop:i_vals = np.arange(chan_start_idx, chan_stop_idx)else:i_vals = np.arange(chan_stop_idx, chan_start_idx)self.freqs = f_delt * i_vals + f0if f_delt < :self.freqs = self.freqs[::-]if chan_stop_idx < chan_start_idx:chan_stop_idx, chan_start_idx = chan_start_idx,chan_stop_idxreturn i_start, i_stop, chan_start_idx, chan_stop_idx", "docstring": "Setup frequency axis", "id": "f8176:c0:m3"} {"signature": "@sportsref.decorators.memoizedef get_team_ids(self):", "body": "df = self.team_stats_per_game()if not df.empty:return df.index.tolist()else:print('')return []", "docstring": "Returns a list of the team IDs for the given year.\n :returns: List of team IDs.", "id": "f8179:c0:m7"} {"signature": "@sportsref.decorators.memoizedef get_sub_doc(self, subpage):", "body": "html = sportsref.utils.get_html(self._subpage_url(subpage))return pq(html)", "docstring": "Returns PyQuery object for a given subpage URL.\n :subpage: The subpage of the season, e.g. 'per_game'.\n :returns: PyQuery object.", "id": "f8179:c0:m6"} {"signature": "@sportsref.decorators.memoizedef _get_player_stats_table(self, identifier):", "body": "doc = self.get_sub_doc(identifier)table = doc(''.format(identifier))df = sportsref.utils.parse_table(table)return df", "docstring": "Helper function for player season stats.\n\n :identifier: string identifying the type of stat, e.g. 'per_game'.\n :returns: A DataFrame of stats.", "id": "f8179:c0:m22"} {"signature": "def standings(self):", "body": "doc = self.get_sub_doc('')east_table = doc('')east_df = pd.DataFrame(sportsref.utils.parse_table(east_table))east_df.sort_values('', ascending=False, inplace=True)east_df[''] = range(, len(east_df) + )east_df[''] = ''west_table = doc('')west_df = sportsref.utils.parse_table(west_table)west_df.sort_values('', ascending=False, inplace=True)west_df[''] = range(, len(west_df) + )west_df[''] = ''full_df = pd.concat([east_df, west_df], axis=).reset_index(drop=True)full_df[''] = full_df.team_id.str.extract(r'', expand=False)full_df[''] = [gb if isinstance(gb, int) or isinstance(gb, float) else for gb in full_df['']]full_df = full_df.drop('', axis=)expanded_table = doc('')expanded_df = sportsref.utils.parse_table(expanded_table)full_df = pd.merge(full_df, expanded_df, on='')return full_df", "docstring": "Returns a DataFrame containing standings information.", "id": "f8179:c0:m13"} {"signature": "def misc_stats(self):", "body": "return self._get_team_stats_table('')", "docstring": "Returns a Pandas DataFrame of miscellaneous stats about each team's\n season.", "id": "f8179:c0:m19"} {"signature": "def team_stats_totals(self):", "body": "return self._get_team_stats_table('')", "docstring": "Returns a Pandas DataFrame of each team's basic stat totals for the\n season.", "id": "f8179:c0:m17"} {"signature": "def player_stats_per_game(self):", "body": "return self._get_player_stats_table('')", "docstring": "Returns a DataFrame of per-game player stats for a season.", "id": "f8179:c0:m23"} {"signature": "def finals_loser(self):", "body": "raise NotImplementedError('')", "docstring": "Returns the team ID for the loser of that year's NBA Finals.\n :returns: 3-letter team ID for runner-up.", "id": "f8179:c0:m12"} {"signature": "@sportsref.decorators.memoizedef draft_year(self):", "body": "raise Exception('')", "docstring": "Returns the year the player was selected (or undrafted).\n :returns: TODO", "id": "f8180:c0:m14"} {"signature": "@sportsref.decorators.memoize@sportsref.decorators.kind_rpb(include_type=True)def gamelog_advanced(self, year, kind=''):", "body": "doc = self.get_sub_doc(''.format(year))table = (doc('')if kind == '' else doc(''))df = sportsref.utils.parse_table(table)return df", "docstring": "Returns a table of a player's advanced game-by-game stats for a\n season.\n\n :param year: The year representing the desired season.\n :param kind: specifies regular season, playoffs, or both. One of 'R',\n 'P', 'B'. Defaults to 'R'.\n :returns: A DataFrame of the player's advanced stats from each game of\n the season.\n :rtype: pd.DataFrame", "id": "f8180:c0:m24"} {"signature": "@sportsref.decorators.memoizedef age(self, year, month=, day=):", "body": "doc = self.get_main_doc()date_string = doc('').attr('')regex = r''date_args = map(int, re.match(regex, date_string).groups())birth_date = datetime.date(*date_args)age_date = datetime.date(year=year, month=month, day=day)delta = age_date - birth_dateage = delta.days / return age", "docstring": "Returns the age of the player on a given date.\n\n :year: int representing the year.\n :month: int representing the month (1-12).\n :day: int representing the day within the month (1-31).\n :returns: Age in years as a float.", "id": "f8180:c0:m8"} {"signature": "@sportsref.decorators.memoizedef name(self):", "body": "doc = self.get_main_doc()return doc('').text()", "docstring": "Returns the name of the player as a string.", "id": "f8180:c0:m7"} {"signature": "@sportsref.decorators.memoizedef away(self):", "body": "linescore = self.linescore()return linescore.loc['', '']", "docstring": "Returns away team ID.\n :returns: 3-character string representing away team's ID.", "id": "f8181:c0:m10"} {"signature": "@sportsref.decorators.memoizedef season(self):", "body": "d = self.date()if d.month >= :return d.year + else:return d.year", "docstring": "Returns the year ID of the season in which this game took place.\n\n:returns: An int representing the year of the season.", "id": "f8181:c0:m14"} {"signature": "@sportsref.decorators.memoizedef basic_stats(self):", "body": "return self._get_player_stats('')", "docstring": "Returns a DataFrame of basic player stats from the game.", "id": "f8181:c0:m16"} {"signature": "@sportsref.decorators.memoizedef home_score(self):", "body": "linescore = self.linescore()return linescore.loc['', '']", "docstring": "Returns score of the home team.\n :returns: int of the home score.", "id": "f8181:c0:m11"} {"signature": "@sportsref.decorators.memoizedef linescore(self):", "body": "doc = self.get_main_doc()table = doc('')columns = [th.text() for th in table('').items('')]columns[] = ''data = [[sportsref.utils.flatten_links(td) for td in tr('').items()]for tr in table('').next_all('').items()]return pd.DataFrame(data, index=['', ''],columns=columns, dtype='')", "docstring": "Returns the linescore for the game as a DataFrame.", "id": "f8181:c0:m8"} {"signature": "def _get_player_stats(self, table_id_fmt):", "body": "doc = self.get_main_doc()tms = self.away(), self.home()tm_ids = [table_id_fmt.format(tm) for tm in tms]tables = [doc(''.format(tm_id).lower()) for tm_id in tm_ids]dfs = [sportsref.utils.parse_table(table) for table in tables]for i, (tm, df) in enumerate(zip(tms, dfs)):no_time = df[''] == stat_cols = [col for col, dtype in df.dtypes.items()if dtype != '']df.loc[no_time, stat_cols] = df[''] = tmdf[''] = i == df[''] = [p < for p in range(df.shape[])]df.drop_duplicates(subset='', keep='', inplace=True)return pd.concat(dfs)", "docstring": "Returns a DataFrame of player stats from the game (either basic or\n advanced, depending on the argument.\n\n :param table_id_fmt: Format string for str.format with a placeholder\n for the team ID (e.g. 'box_{}_basic')\n :returns: DataFrame of player stats", "id": "f8181:c0:m15"} {"signature": "@sportsref.decorators.memoizedef home(self):", "body": "linescore = self.linescore()return linescore.loc['', '']", "docstring": "Returns home team ID.\n :returns: 3-character string representing home team's ID.", "id": "f8181:c0:m9"} {"signature": "@sportsref.decorators.memoizedef team_ids_to_names(self):", "body": "return sportsref.nfl.teams.team_names(self.yr)", "docstring": "Mapping from 3-letter team IDs to full team names.\n\n :returns: Dictionary with team IDs as keys and full team strings as\n values.", "id": "f8187:c0:m8"} {"signature": "@sportsref.decorators.memoizedef team_names_to_ids(self):", "body": "return sportsref.nfl.teams.team_ids(self.yr)", "docstring": "Mapping from full team names to 3-letter team IDs.\n :returns: Dictionary with tean names as keys and team IDs as values.", "id": "f8187:c0:m9"} {"signature": "@sportsref.decorators.memoize@sportsref.decorators.kind_rpb(include_type=True)def defense(self, kind=''):", "body": "doc = self.get_doc()table = (doc('') if kind == '' elsedoc(''))df = sportsref.utils.parse_table(table)return df", "docstring": "Gets yearly defense stats for the player (also has AV stats for OL).\n\n :kind: One of 'R', 'P', or 'B'. Case-insensitive; defaults to 'R'.\n :returns: Pandas DataFrame with rushing/receiving stats.", "id": "f8188:c0:m23"} {"signature": "@sportsref.decorators.memoizedef _simple_year_award(self, award_id):", "body": "doc = self.get_doc()table = doc(''.format(award_id))return list(map(int, sportsref.utils.parse_awards_table(table)))", "docstring": "Template for simple award functions that simply list years, such as\n pro bowls and first-team all pro.\n\n :award_id: The div ID that is appended to \"leaderboard_\" in selecting\n the table's div.\n :returns: List of years for the award.", "id": "f8188:c0:m30"} {"signature": "@sportsref.decorators.memoize@sportsref.decorators.kind_rpb(include_type=True)def gamelog(self, year=None, kind=''):", "body": "url = self._subpage_url('', None) doc = pq(sportsref.utils.get_html(url))table = (doc('') if kind == '' elsedoc(''))df = sportsref.utils.parse_table(table)if year is not None:df = df.query('').reset_index(drop=True)return df", "docstring": "Gets the career gamelog of the given player.\n :kind: One of 'R', 'P', or 'B' (for regular season, playoffs, or both).\n Case-insensitive; defaults to 'R'.\n :year: The year for which the gamelog should be returned; if None,\n return entire career gamelog. Defaults to None.\n :returns: A DataFrame with the player's career gamelog.", "id": "f8188:c0:m20"} {"signature": "@sportsref.decorators.memoize@sportsref.decorators.kind_rpb(include_type=True)def passing(self, kind=''):", "body": "doc = self.get_doc()table = (doc('') if kind == '' elsedoc(''))df = sportsref.utils.parse_table(table)return df", "docstring": "Gets yearly passing stats for the player.\n\n :kind: One of 'R', 'P', or 'B'. Case-insensitive; defaults to 'R'.\n :returns: Pandas DataFrame with passing stats.", "id": "f8188:c0:m21"} {"signature": "@decorators.switch_to_dir(os.path.dirname(os.path.realpath(__file__)))def inputs_options_defaults():", "body": "if os.path.isfile(PSF_CONSTANTS_FILENAME):modtime = int(os.path.getmtime(PSF_CONSTANTS_FILENAME))curtime = int(time.time())if (os.path.isfile(PSF_CONSTANTS_FILENAME)and curtime - modtime <= * * * ):with open(PSF_CONSTANTS_FILENAME, '') as const_f:def_dict = json.load(const_f)else:print('')html = utils.get_html(PSF_URL)doc = pq(html)def_dict = {}for inp in doc(''):name = inp.attrib['']if name not in def_dict:def_dict[name] = {'': set(),'': set(),'': inp.attrib['']}if inp.attrib[''] in ('', ''):if '' in inp.attrib:def_dict[name][''].add(inp.attrib[''])def_dict[name][''].add(inp.attrib[''])else:def_dict[name][''].add(inp.attrib.get('', ''))for sel in doc.items(''):name = sel.attr['']if name not in def_dict:def_dict[name] = {'': set(),'': set(),'': ''}defaultOpt = sel('')if len(defaultOpt):defaultOpt = defaultOpt[]def_dict[name][''].add(defaultOpt.attrib.get('', ''))else:def_dict[name][''].add(sel('')[].attrib.get('', ''))def_dict[name][''] = {opt.attrib[''] for opt in sel('')if opt.attrib.get('')}def_dict.pop('', None)def_dict.pop('', None)with open(PSF_CONSTANTS_FILENAME, '') as f:for k in def_dict:try:def_dict[k][''] = sorted(list(def_dict[k]['']), key=int)def_dict[k][''] = sorted(list(def_dict[k]['']), key=int)except Exception:def_dict[k][''] = sorted(list(def_dict[k]['']))def_dict[k][''] = sorted(list(def_dict[k]['']))json.dump(def_dict, f)return def_dict", "docstring": "Handles scraping options for player-season finder form.\n\n :returns: {'name1': {'value': val, 'options': [opt1, ...] }, ... }", "id": "f8189:m2"} {"signature": "def _kwargs_to_qs(**kwargs):", "body": "inpOptDef = inputs_options_defaults()opts = {name: dct['']for name, dct in inpOptDef.items()}for k, v in kwargs.items():if k.lower() in ('', ''):del kwargs[k]kwargs[''] = vif k == '':if v.startswith(''):kwargs[k] = utils.rel_url_to_id(v)if isinstance(v, bool):kwargs[k] = '' if v else ''if k.lower() in ('', ''):del kwargs[k]kwargs[''] = vif k.lower() in ('', ''):del kwargs[k]if k.lower() == '':kwargs[''] = int(v)else:kwargs[''] = int(v)if k.lower() in ('', ''):del kwargs[k]if k.lower() == '':kwargs[''] = int(v)else:kwargs[''] = int(v)if k.lower() in ('', '', '', ''):del kwargs[k]if isinstance(v, collections.Iterable):lst = list(v)kwargs[''] = min(lst)kwargs[''] = max(lst)elif isinstance(v, basestring):v = list(map(int, v.split('')))kwargs[''] = min(v)kwargs[''] = max(v)else:kwargs[''] = vkwargs[''] = vif k.lower() in ('', '', '', ''):del kwargs[k]if isinstance(v, collections.Iterable):lst = list(v)kwargs[''] = min(lst)kwargs[''] = max(lst)elif isinstance(v, basestring):v = list(map(int, v.split('')))kwargs[''] = min(v)kwargs[''] = max(v)else:kwargs[''] = vkwargs[''] = vif k == '':kwargs[''] = ''if isinstance(v, basestring):v = v.split('')if not isinstance(v, collections.Iterable):v = [v]for k in kwargs:if k in opts:opts[k] = []for k, v in kwargs.items():if k in opts:if isinstance(v, basestring):v = v.split('')elif not isinstance(v, collections.Iterable):v = [v]for val in v:opts[k].append(val)opts[''] = []qs = ''.join(''.format(name, val)for name, vals in sorted(opts.items()) for val in vals)return qs", "docstring": "Converts kwargs given to GPF to a querystring.\n\n :returns: the querystring.", "id": "f8190:m1"} {"signature": "@sportsref.decorators.memoizedef week(self):", "body": "doc = self.get_doc()raw = doc('').attr['']match = re.match(r''.format(self.season()), raw)if match:return int(match.group())else:return ", "docstring": "Returns the week in which this game took place. 18 is WC round, 19\n is Div round, 20 is CC round, 21 is SB.\n :returns: Integer from 1 to 21.", "id": "f8193:c0:m14"} {"signature": "@sportsref.decorators.memoizedef snap_counts(self):", "body": "doc = self.get_doc()table_ids = ('', '')tms = (self.away(), self.home())df = pd.concat([sportsref.utils.parse_table(doc(''.format(table_id))).assign(is_home=bool(i), team=tms[i], opp=tms[i*-+])for i, table_id in enumerate(table_ids)])if df.empty:return dfreturn df.set_index('')", "docstring": "Gets the snap counts for both teams' players and returns them in a\n DataFrame. Note: only goes back to 2012.\n\n :returns: DataFrame of snap count data", "id": "f8193:c0:m25"} {"signature": "@sportsref.decorators.memoizedef ref_info(self):", "body": "doc = self.get_doc()table = doc('')return sportsref.utils.parse_info_table(table)", "docstring": "Gets a dictionary of ref positions and the ref IDs of the refs for\n that game.\n\n :returns: A dictionary of ref positions and IDs.", "id": "f8193:c0:m23"} {"signature": "@sportsref.decorators.memoizedef coin_toss(self):", "body": "doc = self.get_doc()table = doc('')giTable = sportsref.utils.parse_info_table(table)if '' in giTable:passelse:return None", "docstring": "Gets information relating to the opening coin toss.\n\n Keys are:\n * wonToss - contains the ID of the team that won the toss\n * deferred - bool whether the team that won the toss deferred it\n\n :returns: Dictionary of coin toss-related info.", "id": "f8193:c0:m20"} {"signature": "@sportsref.decorators.memoizedef weekday(self):", "body": "days = ['', '', '', '', '','', '']date = self.date()wd = date.weekday()return days[wd]", "docstring": "Returns the day of the week on which the game occurred.\n :returns: String representation of the day of the week for the game.", "id": "f8193:c0:m8"} {"signature": "@sportsref.decorators.memoizedef away(self):", "body": "doc = self.get_doc()table = doc('')relURL = table('').eq()('').eq().attr['']away = sportsref.utils.rel_url_to_id(relURL)return away", "docstring": "Returns away team ID.\n :returns: 3-character string representing away team's ID.", "id": "f8193:c0:m10"} {"signature": "@sportsref.decorators.memoizedef opp_stats(self, year):", "body": "doc = self.get_year_doc(year)table = doc('')df = sportsref.utils.parse_table(table)return df.loc[df.player_id == ''].iloc[]", "docstring": "Returns a Series (dict-like) of the team's opponent's stats from the\n team-season page.\n\n :year: Int representing the season.\n :returns: A Series of team stats.", "id": "f8195:c0:m24"} {"signature": "@sportsref.decorators.memoizedef team_stats(self, year):", "body": "doc = self.get_year_doc(year)table = doc('')df = sportsref.utils.parse_table(table)if df.empty:return pd.Series()return df.loc[df.player_id == ''].iloc[]", "docstring": "Returns a Series (dict-like) of team stats from the team-season\n page.\n\n :year: Int representing the season.\n :returns: A Series of team stats.", "id": "f8195:c0:m23"} {"signature": "@sportsref.decorators.memoizedef team_ids(year):", "body": "names = team_names(year)return {v: k for k, v in names.items()}", "docstring": "Returns a mapping from team name to team ID for a given season. Inverse\n mapping of team_names. Example of a full team name: \"New England Patriots\"\n\n :year: The year of the season in question (as an int).\n :returns: A dictionary with full team name keys and teamID values.", "id": "f8195:m1"} {"signature": "@sportsref.decorators.memoizedef stadium(self, year):", "body": "anchor = self._year_info_pq(year, '')('')return sportsref.utils.rel_url_to_id(anchor.attr[''])", "docstring": "Returns the ID for the stadium in which the team played in a given\n year.\n\n :year: The year in question.\n :returns: A string representing the stadium ID.", "id": "f8195:c0:m20"} {"signature": "@sportsref.decorators.memoizedef schedule(self, year):", "body": "doc = self.get_year_doc(year)table = doc('')df = sportsref.utils.parse_table(table)if df.empty:return pd.DataFrame()df = df.loc[df[''].notnull()]df[''] = np.arange(len(df)) + df[''] = df[''] == ''df[''] = df[''] == ''df[''] = df[''] == ''df[''] = df[''].isnull()df[''] = df[''].notnull()return df", "docstring": "Returns a DataFrame with schedule information for the given year.\n\n :year: The year for the season in question.\n :returns: Pandas DataFrame with schedule information.", "id": "f8195:c0:m15"} {"signature": "@sportsref.decorators.memoizedef boxscores(self, year):", "body": "doc = self.get_year_doc(year)table = doc('')df = sportsref.utils.parse_table(table)if df.empty:return np.array([])return df.boxscore_id.values", "docstring": "Gets list of BoxScore objects corresponding to the box scores from\n that year.\n\n :year: The year for which we want the boxscores; defaults to current\n year.\n :returns: np.array of strings representing boxscore IDs.", "id": "f8195:c0:m11"} {"signature": "@sportsref.decorators.memoizedef def_splits(self, year):", "body": "doc = self.get_year_doc(''.format(year))tables = doc('')dfs = [sportsref.utils.parse_table(table) for table in tables.items()]dfs = [df.assign(split=df.columns[]).rename(columns={df.columns[]: ''})for df in dfs]if not dfs:return pd.DataFrame()return pd.concat(dfs).reset_index(drop=True)", "docstring": "Returns a DataFrame of defensive team splits (i.e. opponent splits)\n for a season.\n\n :year: int representing the season.\n :returns: Pandas DataFrame of split data.", "id": "f8195:c0:m28"} {"signature": "@sportsref.decorators.cachedef get_html(url):", "body": "global last_request_timewith throttle_process_lock:with throttle_thread_lock:wait_left = THROTTLE_DELAY - (time.time() - last_request_time.value)if wait_left > :time.sleep(wait_left)response = requests.get(url)last_request_time.value = time.time()if <= response.status_code < :raise ValueError(''.format(response.status_code, url))html = response.texthtml = html.replace('', '').replace('', '')return html", "docstring": "Gets the HTML for the given URL using a GET request.\n\n :url: the absolute URL of the desired page.\n :returns: a string of HTML.", "id": "f8197:m0"} {"signature": "def memoize(fun):", "body": "@funcutils.wraps(fun)def wrapper(*args, **kwargs):do_memoization = sportsref.get_option('')if not do_memoization:return fun(*args, **kwargs)hash_args = tuple(args)hash_kwargs = frozenset(sorted(kwargs.items()))key = (hash_args, hash_kwargs)def _copy(v):if isinstance(v, pq):return v.clone()else:return copy.deepcopy(v)try:ret = _copy(cache[key])return retexcept KeyError:cache[key] = fun(*args, **kwargs)ret = _copy(cache[key])return retexcept TypeError:print(''.format(fun.__name__, key))raisecache = {}return wrapper", "docstring": "A decorator for memoizing functions.\n\n Only works on functions that take simple arguments - arguments that take\n list-like or dict-like arguments will not be memoized, and this function\n will raise a TypeError.", "id": "f8198:m6"} {"signature": "def _read_ready(self):", "body": "try:data = os.read(self._fileno, self.max_size)except InterruptedError:passexcept OSError as exc:self._fatal_error(exc, \"\")else:if data:self._protocol.data_received(data)else:if self._loop.get_debug():logger.info(\"\", self)self._closing = Falseself.pause_reading()self._loop.call_soon(self._protocol.eof_received)self._loop.call_soon(self._call_connection_lost, None)", "docstring": "Called by the event loop whenever the fd is ready for reading.", "id": "f8208:c0:m2"} {"signature": "@asyncio.coroutinedef stream_from_fd(fd, loop):", "body": "reader = asyncio.StreamReader(loop=loop)protocol = asyncio.StreamReaderProtocol(reader, loop=loop)waiter = asyncio.futures.Future(loop=loop)transport = UnixFileDescriptorTransport(loop=loop,fileno=fd,protocol=protocol,waiter=waiter,)try:yield from waiterexcept Exception:transport.close()if loop.get_debug():logger.debug(\"\", fd, transport, protocol)return reader, transport", "docstring": "Recieve a streamer for a given file descriptor.", "id": "f8208:m0"} {"signature": "def pause_reading(self):", "body": "self._loop.remove_reader(self._fileno)self._active = False", "docstring": "Public API: pause reading the transport.", "id": "f8208:c0:m3"} {"signature": "def close(self):", "body": "if not self._closing:self._close()", "docstring": "Public API: close the transport.", "id": "f8208:c0:m5"} {"signature": "def setUp(self):", "body": "PyFunceble.INTERN[\"\"] = \"\"self.file = (PyFunceble.CURRENT_DIRECTORY+ PyFunceble.OUTPUTS[\"\"][\"\"])self.expected_content = {PyFunceble.INTERN[\"\"]: {\"\": {\"\": \"\",\"\": \"\",\"\": \"\",},\"\": {\"\": \"\",\"\": \"\",\"\": \"\",},}}", "docstring": "Setup everything needed for the test", "id": "f8209:c1:m0"} {"signature": "def setUp(self):", "body": "self.data_list = [\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",]self.data_list_url = [\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",]load_config(True)", "docstring": "Setup everything needed for the tests.", "id": "f8211:c0:m0"} {"signature": "def setUp(self):", "body": "PyFunceble.load_config(True)self.valid_domain = [\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",]self.not_valid_domain = [\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",]", "docstring": "Setup what we need for the tests.", "id": "f8212:c0:m0"} {"signature": "def setUp(self):", "body": "self.to_test = {\"\": \"\",\"\": {\"\", \"\"},\"\": [\"\", \"\"],\"\": \"\",\"\": [\"\"],}", "docstring": "Setup everything needed for the tests.", "id": "f8221:c3:m0"} {"signature": "def setUp(self):", "body": "PyFunceble.initiate(True)PyFunceble.load_config(True)BaseStdout.setUp(self)logo =", "docstring": "Setup everything needed.", "id": "f8222:c1:m0"} {"signature": "def setUp(self):", "body": "PyFunceble.load_config(True)self.domains = [\"\",\"\",\"\",\"\",\"\",\"\",]", "docstring": "Setup everything that is needed for the tests.", "id": "f8222:c2:m0"} {"signature": "def setUp(self):", "body": "PyFunceble.load_config(True)self.file = \"\"self.to_print = {\"\": {\"\": , \"\": , \"\": , \"\": , \"\": },\"\": [, , , , , ],\"\": \"\",\"\": {\"\": , \"\": },}", "docstring": "Setup everything needed for the tests.", "id": "f8224:c0:m0"} {"signature": "def load(self):", "body": "if \"\" not in PyFunceble.INTERN or not PyFunceble.INTERN[\"\"]:PyFunceble.INTERN[\"\"] = self.iana_db", "docstring": "Initiate the IANA database if it is not the case.", "id": "f8225:c0:m1"} {"signature": "@classmethoddef _install_directory_structure_file(cls):", "body": "dir_structure_link = PyFunceble.CONFIGURATION[\"\"][\"\"]dir_structure_link = Version(True).right_url_from_version(dir_structure_link)destination = (PyFunceble.CURRENT_DIRECTORY+ PyFunceble.CONFIGURATION[\"\"][\"\"][\"\"])if not Version(True).is_cloned() or not PyFunceble.path.isfile(destination):data = Download(dir_structure_link, destination, return_data=True).text()File(destination).write(data, overwrite=True)return Truereturn None", "docstring": "Download the latest version of `dir_structure_production.json`.", "id": "f8227:c0:m6"} {"signature": "@classmethoddef check_versions_literally(cls, local, upstream):", "body": "return local == upstream", "docstring": "Compare the given versions literally.\n\n:param local: The local version converted by split_versions().\n:type local: str\n\n:param upstream: The upstream version converted by split_versions().\n:type upstream: str\n\n:return:\n - True: local == upstream\n - False: local != upstream\n:rtype: bool", "id": "f8227:c2:m2"} {"signature": "@classmethoddef is_cloned(cls):", "body": "if not PyFunceble.path.isdir(\"\"):return Falselist_of_file = [\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",]list_of_dir = [\"\", \"\", \"\"]for file in list_of_file:if not PyFunceble.path.isfile(file):return Falsefor directory in list_of_dir:if not PyFunceble.path.isdir(directory):return Falsereturn True", "docstring": "Let us know if we are currently in the cloned version of\nPyFunceble which implicitly mean that we are in developement mode.", "id": "f8227:c2:m5"} {"signature": "@classmethoddef right_url_from_version(cls, url):", "body": "if \"\" in PyFunceble.VERSION:return url.replace(\"\", \"\")return url.replace(\"\", \"\")", "docstring": "Convert the GitHub URL to the right one depending of the\nbranch or version we are working with.\n\n:param url: The URL to convert.\n:type url: str\n\n:return: The converted URL.\n:rtype: str", "id": "f8227:c2:m6"} {"signature": "def compare(self):", "body": "if self.upstream_data[\"\"][\"\"]:for minimal in self.upstream_data[\"\"][\"\"]:checked = self.check_versions(self.local_splited, self.split_versions(minimal))if not PyFunceble.CONFIGURATION[\"\"]:if checked or checked is not False and not checked:message = (PyFunceble.Style.BRIGHT+ PyFunceble.Fore.RED+ \"\"+ PyFunceble.Style.RESET_ALL) message += (PyFunceble.Style.BRIGHT+ PyFunceble.Fore.GREEN+ \"\"+ PyFunceble.Style.RESET_ALL) print(message)exit()elif checked or checked is not False and not checked:raise Exception(\"\" )for version in self.upstream_data[\"\"]:checked = self.check_versions(self.local_splited, self.split_versions(version))if (not PyFunceble.CONFIGURATION[\"\"]and checkedor checked is not Falseand not checked):message = (PyFunceble.Style.BRIGHT+ PyFunceble.Fore.RED+ \"\"+ PyFunceble.Style.RESET_ALL) message += (PyFunceble.Style.BRIGHT+ PyFunceble.Fore.GREEN+ \"\"+ PyFunceble.Style.RESET_ALL) print(message)returnif checked or checked is not False and not checked:print(\"\")returnstatus = self.check_versions(self.local_splited,self.split_versions(self.upstream_data[\"\"]),)if status is not None and not status and not PyFunceble.CONFIGURATION[\"\"]:message = (PyFunceble.Style.BRIGHT+ PyFunceble.Fore.CYAN+ \"\" + PyFunceble.Style.RESET_ALL)message += (PyFunceble.Style.BRIGHT+ \"\"+ PyFunceble.Style.RESET_ALL+ PyFunceble.VERSION+ \"\")message += (PyFunceble.Style.BRIGHT+ \"\"+ PyFunceble.Style.RESET_ALL+ self.upstream_data[\"\"]+ \"\")print(message)elif status:if not PyFunceble.CONFIGURATION[\"\"]:message = (PyFunceble.Style.BRIGHT+ PyFunceble.Fore.YELLOW+ \"\"+ PyFunceble.Style.RESET_ALL) message += (PyFunceble.Style.BRIGHT+ \"\"+ PyFunceble.Style.RESET_ALL+ PyFunceble.VERSION+ \"\") message += (PyFunceble.Style.BRIGHT+ \"\"+ PyFunceble.Style.RESET_ALL+ self.upstream_data[ \"\"]+ \"\")print(message)else:print(\"\")return", "docstring": "Compare the current version with the upstream saved version.", "id": "f8227:c2:m4"} {"signature": "def header(self, do_not_print=False): ", "body": "if (not PyFunceble.CONFIGURATION[\"\"]or self.template == \"\"or do_not_print):if (self.template.lower() in PyFunceble.STATUS[\"\"][\"\"]or self.template == \"\"):to_print = self.headers[\"\"]if (self.template.lower() in PyFunceble.STATUS[\"\"][\"\"]and PyFunceble.HTTP_CODE[\"\"]):to_print = Dict(to_print).remove_key(\"\")elif self.template.lower() in PyFunceble.STATUS[\"\"][\"\"]:to_print = self.headers[PyFunceble.STATUS[\"\"][\"\"]]elif self.template.lower() in PyFunceble.STATUS[\"\"][\"\"]:to_print = self.headers[PyFunceble.STATUS[\"\"][\"\"]]elif self.template.lower() in PyFunceble.STATUS[\"\"][\"\"]:to_print = self.headers[PyFunceble.STATUS[\"\"][\"\"]]elif self.template.lower() in PyFunceble.STATUS[\"\"][\"\"]:to_print = self.headers[PyFunceble.STATUS[\"\"][\"\"]]elif (self.template == \"\"or self.template == \"\"or self.template == \"\"): to_print = self.headers[self.template]if self.template == \"\" and not PyFunceble.HTTP_CODE[\"\"]:to_print[\"\"] = if not PyFunceble.HTTP_CODE[\"\"]:to_print = Dict(to_print).remove_key(\"\")self.currently_used_header = to_printif not do_not_print:self._before_header()for formatted_template in self._header_constructor(to_print):if not self.only_on_file:print(formatted_template)if not PyFunceble.CONFIGURATION[\"\"] and self.output:File(self.output).write(formatted_template + \"\")", "docstring": "Management and creation of templates of header.\nPlease consider as \"header\" the title of each columns.\n\n:param do_not_print:\n Tell us if we have to print the header or not.\n:type do_not_print: bool", "id": "f8228:c0:m3"} {"signature": "def data(self): ", "body": "if isinstance(self.data_to_print, list):to_print = {}to_print_size = []alone_cases = [\"\", \"\"]without_header = [\"\", \"\"]if self.template.lower() == \"\":if not PyFunceble.CONFIGURATION[\"\"] and self.output:return self._json_print()return Noneif self.template not in alone_cases and self.template not in without_header:self.header(True)to_print_size = self._size_from_header(self.currently_used_header)elif self.template in without_header:for data in self.data_to_print:to_print_size.append(str(len(data)))else:to_print_size = self._size_from_header(self.headers[self.template])to_print = self._data_constructor(to_print_size)self._before_header()for data in self._header_constructor(to_print, False):if self.template.lower() in PyFunceble.STATUS[\"\"][\"\"] or self.template in [\"\", \"\"]:if not self.only_on_file:colorified_data = self._colorify(data)print(colorified_data)if not PyFunceble.CONFIGURATION[\"\"] and self.output:File(self.output).write(data + \"\")else:raise Exception(\"\")", "docstring": "Management and input of data to the table.\n\n:raises:\n :code:`Exception`\n When self.data_to_print is not a list.", "id": "f8228:c0:m8"} {"signature": "@classmethoddef file_to_delete(cls):", "body": "directory = PyFunceble.OUTPUT_DIRECTORY + PyFunceble.OUTPUTS[\"\"]if not directory.endswith(PyFunceble.directory_separator): directory += PyFunceble.directory_separatorresult = []for root, _, files in PyFunceble.walk(directory):for file in files:if file not in [\"\", \"\"]:if root.endswith(PyFunceble.directory_separator):result.append(root + file)else:result.append(root + PyFunceble.directory_separator + file) return result", "docstring": "Return the list of file to delete.", "id": "f8229:c0:m1"} {"signature": "@classmethoddef databases_to_delete(cls): ", "body": "directory = PyFunceble.CURRENT_DIRECTORYresult = []result.append(directory+ PyFunceble.CONFIGURATION[\"\"][\"\"][\"\"])result.append(directory + PyFunceble.CONFIGURATION[\"\"][\"\"][\"\"])result.append(directory+ PyFunceble.CONFIGURATION[\"\"][\"\"][\"\"])result.append(directory+ PyFunceble.CONFIGURATION[\"\"][\"\"][\"\"])result.append(directory + PyFunceble.CONFIGURATION[\"\"][\"\"][\"\"])result.append(directory + PyFunceble.CONFIGURATION[\"\"][\"\"][\"\"])return result", "docstring": "Set the databases files to delete.", "id": "f8229:c0:m2"} {"signature": "@classmethoddef _calculate(cls):", "body": "percentages = {\"\": PyFunceble.INTERN[\"\"][\"\"][\"\"],\"\": PyFunceble.INTERN[\"\"][\"\"][\"\"],\"\": PyFunceble.INTERN[\"\"][\"\"][\"\"],}for percentage in percentages:calculation = (percentages[percentage]* // PyFunceble.INTERN[\"\"][\"\"][\"\"])PyFunceble.INTERN[\"\"][\"\"].update({percentage: calculation})", "docstring": "Calculate the percentage of each status.", "id": "f8230:c0:m2"} {"signature": "@classmethoddef travis_permissions(cls):", "body": "if PyFunceble.CONFIGURATION[\"\"]:try:build_dir = PyFunceble.environ[\"\"]commands = [\"\" % (build_dir),\"\" % (build_dir),\"\" % (build_dir),\"\"% (build_dir + PyFunceble.directory_separator),r\"\" % (build_dir),]for command in commands:Command(command).execute()if Command(\"\").execute() == \"\":Command(\"\").execute()except KeyError:pass", "docstring": "Set permissions in order to avoid issues before commiting.", "id": "f8231:c0:m1"} {"signature": "def _travis(self):", "body": "if PyFunceble.CONFIGURATION[\"\"]:try:_ = PyFunceble.environ[\"\"]time_autorisation = Falsetry:time_autorisation = int(PyFunceble.time()) >= int(PyFunceble.INTERN[\"\"]) + (int(PyFunceble.CONFIGURATION[\"\"]) * )except KeyError:if self.last and not self.bypass:raise Exception(\"\")if self.last or time_autorisation or self.bypass:Percentage().log()self.travis_permissions()command = ''if self.last or self.bypass:if PyFunceble.CONFIGURATION[\"\"]:for line in Command(PyFunceble.CONFIGURATION[\"\"]).run():sys_stdout.write(\"\".format(line))self.travis_permissions()message = (PyFunceble.CONFIGURATION[\"\"]+ \"\")Command(command % message).execute()else:if PyFunceble.CONFIGURATION[\"\"]:for line in Command(PyFunceble.CONFIGURATION[\"\"]).run():sys_stdout.write(\"\".format(line))self.travis_permissions()Command(command % PyFunceble.CONFIGURATION[\"\"]).execute()print(Command(\"\"% PyFunceble.CONFIGURATION[\"\"]).execute())exit()except KeyError:pass", "docstring": "Logic behind autosave under Travis CI.", "id": "f8231:c0:m2"} {"signature": "def analytic_file(self, new_status, old_status=None):", "body": "if not old_status:old_status = self.domain_statusif \"\" in PyFunceble.INTERN and PyFunceble.INTERN[\"\"]:output = (self.output_parent_dir+ PyFunceble.OUTPUTS[\"\"][\"\"][\"\"]+ \"\")if new_status.lower() in PyFunceble.STATUS[\"\"][\"\"]:output = output % (PyFunceble.OUTPUTS[\"\"][\"\"][\"\"],PyFunceble.OUTPUTS[\"\"][\"\"][\"\"],)Generate(\"\").info_files()elif new_status.lower() in PyFunceble.STATUS[\"\"][\"\"]:output = output % (PyFunceble.OUTPUTS[\"\"][\"\"][\"\"],PyFunceble.OUTPUTS[\"\"][\"\"][\"\"],)Generate(\"\").info_files()elif new_status.lower() in PyFunceble.STATUS[\"\"][\"\"]:output = output % (PyFunceble.OUTPUTS[\"\"][\"\"][\"\"],PyFunceble.OUTPUTS[\"\"][\"\"][\"\"],)Generate(\"\").info_files()else:output = output % (PyFunceble.OUTPUTS[\"\"][\"\"][\"\"],PyFunceble.OUTPUTS[\"\"][\"\"][\"\"],)Generate(\"\").info_files()Prints([self.tested,old_status,PyFunceble.INTERN[\"\"],PyFunceble.CURRENT_TIME,],\"\",output,True,).data()", "docstring": "Generate :code:`Analytic/*` files based on the given old and\nnew statuses.\n\n:param new_status: The new status of the domain.\n:type new_status: str\n\n:param old_status: The old status of the domain.\n:type old_status: str", "id": "f8232:c0:m5"} {"signature": "def _prints_status_file(self): ", "body": "if PyFunceble.INTERN[\"\"]:output = (self.output_parent_dir+ PyFunceble.OUTPUTS[\"\"][\"\"]+ self.domain_status)if PyFunceble.CONFIGURATION[\"\"]:Prints([self.tested, self.domain_status, self.source], \"\", output, True).data()elif PyFunceble.CONFIGURATION[\"\"]:if self.domain_status.lower() in PyFunceble.STATUS[\"\"][\"\"]:if PyFunceble.HTTP_CODE[\"\"]:data_to_print = [self.tested,self.expiration_date,self.source,PyFunceble.INTERN[\"\"],PyFunceble.CURRENT_TIME,]else:data_to_print = [self.tested,self.expiration_date,self.source,PyFunceble.CURRENT_TIME,]Prints(data_to_print, PyFunceble.STATUS[\"\"][\"\"], output, True).data()elif self.domain_status.lower() in PyFunceble.STATUS[\"\"][\"\"]:data_to_print = [self.tested, self.source, PyFunceble.CURRENT_TIME]Prints(data_to_print,PyFunceble.STATUS[\"\"][\"\"],output,True,).data()elif self.domain_status.lower() in PyFunceble.STATUS[\"\"][\"\"]:if PyFunceble.HTTP_CODE[\"\"]:data_to_print = [self.tested,PyFunceble.INTERN[\"\"],self.domain_status,self.source,PyFunceble.INTERN[\"\"],PyFunceble.CURRENT_TIME,]else:data_to_print = [self.tested,PyFunceble.INTERN[\"\"],self.domain_status,self.source,PyFunceble.CURRENT_TIME,]Prints(data_to_print,PyFunceble.STATUS[\"\"][\"\"],output,True,).data()elif self.domain_status.lower() in PyFunceble.STATUS[\"\"][\"\"]:if PyFunceble.HTTP_CODE[\"\"]:data_to_print = [self.tested,self.source,PyFunceble.INTERN[\"\"],PyFunceble.CURRENT_TIME,]else:data_to_print = [self.tested,self.source,PyFunceble.CURRENT_TIME,]Prints(data_to_print,PyFunceble.STATUS[\"\"][\"\"],output,True,).data()", "docstring": "Logic behind the printing (in file) when generating status file.", "id": "f8232:c0:m6"} {"signature": "def status_file(self): ", "body": "if \"\" in PyFunceble.INTERN:Generate(self.domain_status, self.source, self.expiration_date).info_files()Percentage(self.domain_status).count()self._prints_status_screen()if self._do_not_produce_file():return Noneif (not PyFunceble.CONFIGURATION[\"\"]and PyFunceble.CONFIGURATION[\"\"]):self._prints_status_file()else:self.unified_file()", "docstring": "Generate a file according to the domain status.", "id": "f8232:c0:m8"} {"signature": "def _extract_base(self, element):", "body": "if isinstance(element, list):return [self._extract_base(x) for x in element]base = self.checker.is_url_valid(url=element, return_base=True)if base:return baseif \"\" in element:return element.split(\"\")[]return element", "docstring": "Extract the base of the given element.\n\n.. example:\n given \"hello/world?world=beautiful\" return \"hello\"\n\n:param element: The element we are working with.\n:type element: str|list", "id": "f8233:c0:m4"} {"signature": "def _format_decoded(self, to_format, result=None): ", "body": "if not result:result = []for data in List(to_format).format():if data:if \"\" in data:return self._format_decoded(data.split(\"\"), result)if \"\" in data:return self._format_decoded(data.split(\"\"), result)if \"\" in data:return self._format_decoded(data.split(\"\"), result)if \"\" in data:return self._format_decoded(data.split(\"\"), result)if \"\" in data:return self._format_decoded(data.split(\"\"), result)if data:data = self._extract_base(data)if data and (self.checker.is_domain_valid(data)or self.checker.is_ip_valid(data)):result.append(data)elif data:url_base = self.checker.is_url_valid(data, return_base=True)if url_base:result.append(url_base)return result", "docstring": "Format the exctracted adblock line before passing it to the system.\n\n:param to_format: The extracted line from the file.\n:type to_format: str\n\n:param result: A list of the result of this method.\n:type result: list\n\n:return: The list of domains or IP to test.\n:rtype: list", "id": "f8233:c0:m6"} {"signature": "def _handle_options(self, options):", "body": "result = []regex_domain_option = r\"\"for option in options:try:domains = Regex(option, regex_domain_option, return_data=True, rematch=True, group=).match()[-]if domains:if self.aggressive: result.extend([xfor x in domains.split(\"\")if x and not x.startswith(\"\")])else:return Trueexcept TypeError:passreturn result", "docstring": "Handle the data from the options.\n\n:param options: The list of options from the rule.\n:type options: list\n\n:return: The list of domains to return globally.\n:rtype: list", "id": "f8233:c0:m3"} {"signature": "def _remove_ignored(self, list_from_file):", "body": "return [x for x in list_from_file if not self._is_to_ignore(x)]", "docstring": "Removed the ignored element from the given list.\n\n:param list_from_file:\n The list which represent the file we are decoding.\n:type list_from_list: list\n\n:return: The filtered list.\n:rtype: list", "id": "f8233:c0:m1"} {"signature": "@classmethoddef _is_to_ignore(cls, line):", "body": "to_ignore = [r\"\"] for element in to_ignore:if Regex(line, element, return_data=False).match():return Truereturn False", "docstring": "Check if we have to ignore the given line.\n\n:param line: The line from the file.\n:type line: str", "id": "f8233:c0:m2"} {"signature": "def _update_structure_from_config(self, structure):", "body": "to_replace_base_map = {\"\": PyFunceble.OUTPUTS[\"\"]}to_replace_map = {\"\": PyFunceble.OUTPUTS[\"\"][\"\"][\"\"],\"\": PyFunceble.OUTPUTS[\"\"][\"\"][\"\"]+ PyFunceble.OUTPUTS[\"\"][\"\"][\"\"],\"\": PyFunceble.OUTPUTS[\"\"][\"\"][\"\"]+ PyFunceble.OUTPUTS[\"\"][\"\"][\"\"],\"\": PyFunceble.OUTPUTS[\"\"][\"\"][\"\"]+ PyFunceble.OUTPUTS[\"\"][\"\"][\"\"],\"\": PyFunceble.OUTPUTS[\"\"][\"\"][\"\"],\"\": PyFunceble.OUTPUTS[\"\"][\"\"][\"\"]+ PyFunceble.OUTPUTS[\"\"][\"\"][\"\"],\"\": PyFunceble.OUTPUTS[\"\"][\"\"][\"\"]+ PyFunceble.OUTPUTS[\"\"][\"\"][\"\"],\"\": PyFunceble.OUTPUTS[\"\"][\"\"][\"\"]+ PyFunceble.OUTPUTS[\"\"][\"\"][\"\"],\"\": PyFunceble.OUTPUTS[\"\"][\"\"][\"\"]+ PyFunceble.OUTPUTS[\"\"][\"\"][\"\"],\"\": PyFunceble.OUTPUTS[\"\"][\"\"],\"\": PyFunceble.OUTPUTS[\"\"][\"\"]+ PyFunceble.STATUS[\"\"][\"\"],\"\": PyFunceble.OUTPUTS[\"\"][\"\"]+ PyFunceble.STATUS[\"\"][\"\"],\"\": PyFunceble.OUTPUTS[\"\"][\"\"]+ PyFunceble.STATUS[\"\"][\"\"],\"\": PyFunceble.OUTPUTS[\"\"][\"\"]+ PyFunceble.STATUS[\"\"][\"\"],\"\": PyFunceble.OUTPUTS[\"\"][\"\"],\"\": PyFunceble.OUTPUTS[\"\"][\"\"]+ PyFunceble.STATUS[\"\"][\"\"],\"\": PyFunceble.OUTPUTS[\"\"][\"\"]+ PyFunceble.STATUS[\"\"][\"\"],\"\": PyFunceble.OUTPUTS[\"\"][\"\"]+ PyFunceble.STATUS[\"\"][\"\"],\"\": PyFunceble.OUTPUTS[\"\"][\"\"]+ PyFunceble.STATUS[\"\"][\"\"],\"\": PyFunceble.OUTPUTS[\"\"][\"\"],\"\": PyFunceble.OUTPUTS[\"\"][\"\"]+ PyFunceble.STATUS[\"\"][\"\"],\"\": PyFunceble.OUTPUTS[\"\"][\"\"]+ PyFunceble.STATUS[\"\"][\"\"],\"\": PyFunceble.OUTPUTS[\"\"][\"\"]+ PyFunceble.STATUS[\"\"][\"\"],\"\": PyFunceble.OUTPUTS[\"\"][\"\"]+ PyFunceble.STATUS[\"\"][\"\"],\"\": PyFunceble.OUTPUTS[\"\"][\"\"][\"\"],\"\": PyFunceble.OUTPUTS[\"\"][\"\"][\"\"]+ PyFunceble.OUTPUTS[\"\"][\"\"][\"\"],\"\": PyFunceble.OUTPUTS[\"\"][\"\"],}to_replace = {}for mapped, declared in to_replace_map.items():declared = Directory(declared).fix_path()to_replace.update({mapped: declared})to_replace_base = {}for mapped, declared in to_replace_base_map.items():declared = Directory(declared).fix_path()to_replace_base.update({mapped: declared})structure = Dict(structure).rename_key(to_replace_base)structure[PyFunceble.OUTPUTS[\"\"]] = Dict(structure[PyFunceble.OUTPUTS[\"\"]]).rename_key(to_replace)try:Dict(structure).to_json(self.structure)except FileNotFoundError:PyFunceble.mkdir(PyFunceble.directory_separator.join(self.structure.split(PyFunceble.directory_separator)[:-]))Dict(structure).to_json(self.structure)return structure", "docstring": "Update the paths according to configs.\n\n:param structure: The read structure.\n:type structure: dict", "id": "f8235:c0:m3"} {"signature": "def restore(self):", "body": "structure = self._get_structure()list_of_key = list(structure.keys())structure = structure[list_of_key[]]parent_path = list_of_key[]if not parent_path.endswith(PyFunceble.directory_separator):parent_path += PyFunceble.directory_separatorreplacement_status = self._restore_replace()for directory in structure:base = self.base + parent_path + directoryif not base.endswith(PyFunceble.directory_separator):base += PyFunceble.directory_separatorself._create_directory(base)for file in structure[directory]:file_path = base + filecontent_to_write = structure[directory][file][\"\"]online_sha = structure[directory][file][\"\"]content_to_write = Regex(content_to_write, \"\", escape=True, replace_with=\"\").replace()git_to_keep = file_path.replace(\"\", \"\")keep_to_git = file_path.replace(\"\", \"\")if replacement_status:if (PyFunceble.path.isfile(file_path)and Hash(file_path, \"\", True).get() == online_sha):PyFunceble.rename(file_path, git_to_keep)write = Falseelse:File(file_path).delete()file_path = git_to_keepwrite = Trueelse:if (PyFunceble.path.isfile(keep_to_git)and Hash(file_path, \"\", True).get() == online_sha):PyFunceble.rename(file_path, keep_to_git)write = Falseelse:File(keep_to_git).delete()file_path = keep_to_gitwrite = Trueif write:File(file_path).write(content_to_write + \"\", True)self.delete_uneeded()", "docstring": "Restore the 'output/' directory structure based on the `dir_structure.json` file.", "id": "f8235:c0:m6"} {"signature": "@classmethoddef _create_directory(cls, directory, loop=False):", "body": "if not loop and PyFunceble.directory_separator in directory:splited_directory = directory.split(PyFunceble.directory_separator)full_path_to_create = \"\"for single_directory in splited_directory:full_path_to_create += single_directory + PyFunceble.directory_separatorcls._create_directory(full_path_to_create, True)if not PyFunceble.path.isdir(directory):AutoSave.travis_permissions()PyFunceble.mkdir(directory)AutoSave.travis_permissions()", "docstring": "Creates the given directory if it does not exists.\n\n:param directory: The directory to create.\n:type directory: str\n\n:param loop: Tell us if we are in the creation loop or not.\n:type loop: bool", "id": "f8235:c0:m5"} {"signature": "@classmethoddef nslookup(cls):", "body": "try:if \"\" in PyFunceble.INTERN: if not Check().is_ip_valid():request = PyFunceble.socket.getaddrinfo(PyFunceble.INTERN[\"\"],,,,PyFunceble.socket.IPPROTO_TCP,)for sequence in request:PyFunceble.INTERN[\"\"][\"\"].append(sequence[-][])else:request = PyFunceble.socket.gethostbyaddr(PyFunceble.INTERN[\"\"])PyFunceble.INTERN[\"\"][\"\"][\"\"] = request[]PyFunceble.INTERN[\"\"][\"\"][\"\"] = request[]PyFunceble.INTERN[\"\"][\"\"][\"\"] = request[]else:if not Check().is_ip_valid():PyFunceble.socket.getaddrinfo(PyFunceble.INTERN[\"\"],,,,PyFunceble.socket.IPPROTO_TCP,)else:PyFunceble.socket.gethostbyaddr(PyFunceble.INTERN[\"\"])return Trueexcept (OSError, PyFunceble.socket.herror, PyFunceble.socket.gaierror):return False", "docstring": "Implementation of UNIX nslookup.", "id": "f8236:c0:m0"} {"signature": "def is_url_valid(self, url=None, return_base=False, return_formatted=False):", "body": "initial_base = Noneif url:to_test = urlelif self.element:to_test = self.elementelse:to_test = PyFunceble.INTERN[\"\"]if to_test.startswith(\"\"):try:regex = r\"\"initial_base = base = Regex(to_test, regex, return_data=True, rematch=True).match()[]if PyFunceble.CONFIGURATION[\"\"]:base = domain2idna(base)domain_status = self.is_domain_valid(base)ip_status = self.is_ip_valid(base)if domain_status or ip_status:if PyFunceble.CONFIGURATION[\"\"] and return_formatted:return Regex(to_test,initial_base,escape=True,return_data=True,replace_with=base,occurences=,).replace()if return_formatted:return to_testif return_base:return basereturn Trueexcept TypeError:passif return_formatted:return to_testreturn False", "docstring": "Check if the given URL is valid.\n\n:param url: The url to validate.\n:type url: str\n\n:param return_base:\n Allow us the return of the url base (if URL formatted correctly).\n:type return_formatted: bool\n\n:param return_formatted:\n Allow us to get the URL converted to IDNA if the conversion\n is activated.\n:type return_formatted: bool\n\n\n:return: The validity of the URL or its base.\n:rtype: bool|str", "id": "f8238:c0:m1"} {"signature": "def run(self):", "body": "with Popen(self.command, stdout=PIPE, shell=True) as process:while True:current_line = process.stdout.readline().rstrip()if not current_line:breakyield self._decode_output(current_line)", "docstring": "Run the given command and yield each line(s) one by one.\n\n.. note::\n The difference between this method and :code:`self.execute()`\n is that :code:`self.execute()` wait for the process to end\n in order to return its output.", "id": "f8240:c1:m3"} {"signature": "def to_json(self, destination):", "body": "try:with open(destination, \"\") as file:dump(self.main_dictionnary,file,ensure_ascii=False,indent=,sort_keys=True,)except UnicodeEncodeError: with open(destination, \"\", encoding=\"\") as file:dump(self.main_dictionnary,file,ensure_ascii=False,indent=,sort_keys=True,)", "docstring": "Save a dictionnary into a JSON file.\n\n:param destination:\n A path to a file where we're going to\n write the converted dict into a JSON format.\n:type destination: str", "id": "f8240:c2:m4"} {"signature": "def get(self):", "body": "result = {}if self.algorithm in self.valid_algorithms:if self.algorithm == \"\":del self.valid_algorithms[]for algo in self.valid_algorithms:if self.path and path.isfile(self.path):result[algo] = self._hash_file(algo)elif self.data:result[algo] = self._hash_data(algo)else: return Noneelse:if self.path and path.isfile(self.path):result[self.algorithm] = self._hash_file(self.algorithm)elif self.data:result[self.algorithm] = self._hash_data(self.algorithm)else:return Noneelse: return Noneif self.algorithm != \"\" and self.only_hash:return result[self.algorithm]return result", "docstring": "Return the hash of the given file", "id": "f8240:c0:m3"} {"signature": "@classmethoddef from_json(cls, data):", "body": "try:return loads(data)except decoder.JSONDecodeError: return {}", "docstring": "Convert a JSON formatted string into a dictionary.\n\n:param data: A JSON formatted string to convert to dict format.\n:type data: str\n\n:return: The dict representation of the JSON formatted string.\n:rtype: dict", "id": "f8240:c2:m6"} {"signature": "def not_matching_list(self):", "body": "pre_result = comp(self.regex)return [x for x in self.data if not pre_result.search(str(x))]", "docstring": "Return a list of string which don't match the\ngiven regex.", "id": "f8240:c6:m1"} {"signature": "def execute(self):", "body": "process = Popen(self.command, stdout=PIPE, stderr=PIPE, shell=True)(output, error) = process.communicate()if process.returncode != : return self._decode_output(error)return self._decode_output(output)", "docstring": "Execute the given command.\n\n:return: The output of the command.\n:rtype: str", "id": "f8240:c1:m2"} {"signature": "def referer_not_found(self, extension):", "body": "if PyFunceble.CONFIGURATION[\"\"]:to_write = {self.current_time: {\"\": PyFunceble.INTERN[\"\"],\"\": extension,}}if self.output:output = self.outputelse:output = PyFunceble.OUTPUT_DIRECTORYoutput += PyFunceble.OUTPUTS[\"\"]output += PyFunceble.OUTPUTS[\"\"][\"\"][\"\"]output += PyFunceble.OUTPUTS[\"\"][\"\"][\"\"]current_content = self._get_content(output)current_content.update(to_write)self._write_content(current_content, output)if PyFunceble.CONFIGURATION[\"\"]:PyFunceble.requests.post(PyFunceble.LINKS[\"\"], data=to_write[self.current_time])", "docstring": "Logs the case that the referer was not found.\n\n:param extension: The extension of the domain we are testing.\n:type extension: str", "id": "f8241:c0:m5"} {"signature": "def restore(self):", "body": "if PyFunceble.CONFIGURATION[\"\"] and self.backup_content:file_to_restore = PyFunceble.INTERN[\"\"]if file_to_restore in self.backup_content:to_initiate = [\"\", \"\", \"\", \"\"]alternatives = {\"\": \"\",\"\": \"\",\"\": \"\",\"\": \"\",}for string in to_initiate:try:PyFunceble.INTERN[\"\"][\"\"].update({string: self.backup_content[file_to_restore][string]})except KeyError:PyFunceble.INTERN[\"\"][\"\"].update({string: self.backup_content[file_to_restore][alternatives[string]]})", "docstring": "Restore data from the given path.", "id": "f8242:c0:m2"} {"signature": "def backup(self):", "body": "if PyFunceble.CONFIGURATION[\"\"]:data_to_backup = {}configuration_counter = PyFunceble.INTERN[\"\"][\"\"]data_to_backup[PyFunceble.INTERN[\"\"]] = {\"\": configuration_counter[\"\"],\"\": configuration_counter[\"\"],\"\": configuration_counter[\"\"],\"\": configuration_counter[\"\"],}to_save = {}to_save.update(self.backup_content)to_save.update(data_to_backup)Dict(to_save).to_json(self.autocontinue_log_file)", "docstring": "Backup the current execution state.", "id": "f8242:c0:m1"} {"signature": "def _retrieve(self):", "body": "if PyFunceble.CONFIGURATION[\"\"]:if \"\" not in PyFunceble.INTERN:PyFunceble.INTERN[\"\"] = {}if PyFunceble.path.isfile(self.file):data = Dict().from_json(File(self.file).read())for file_path in data:PyFunceble.INTERN[\"\"][file_path] = {}for element in data[file_path]:if data[file_path][element]:PyFunceble.INTERN[\"\"][file_path][element] = data[file_path][element]returnPyFunceble.INTERN[\"\"] = {}return", "docstring": "Retrieve the mining informations.", "id": "f8243:c0:m2"} {"signature": "def file_url(self):", "body": "list_to_test = self._file_list_to_test_filtering()not_filtered = list_to_testtry:list_to_test = List(list(set(list_to_test[PyFunceble.INTERN[\"\"][\"\"][\"\"] :])- set(PyFunceble.INTERN[\"\"]))).format()_ = list_to_test[-]except IndexError:list_to_test = not_filtered[PyFunceble.INTERN[\"\"][\"\"][\"\"] :]del not_filteredif PyFunceble.CONFIGURATION[\"\"]:list_to_test = List(list(list_to_test)).custom_format(Sort.hierarchical)try:return [self.url(x, list_to_test[-]) for x in list_to_test if x]except IndexError:print(PyFunceble.Fore.CYAN + PyFunceble.Style.BRIGHT + \"\")", "docstring": "Manage the case that we have to test a file\n\n.. note::\n 1 URL per line.", "id": "f8244:c0:m17"} {"signature": "@classmethoddef _print_header(cls):", "body": "if (not PyFunceble.CONFIGURATION[\"\"]and not PyFunceble.CONFIGURATION[\"\"]):print(\"\")if PyFunceble.CONFIGURATION[\"\"]:Prints(None, \"\").header()else:Prints(None, \"\").header()PyFunceble.CONFIGURATION[\"\"] = True", "docstring": "Decide if we print or not the header.", "id": "f8244:c0:m7"} {"signature": "def _entry_management(self): ", "body": "if not self.modulo_test: PyFunceble.INTERN[\"\"] = self.file_path self._entry_management_url()AutoSave().travis_permissions()self.bypass()ExecutionTime(\"\")if PyFunceble.CONFIGURATION[\"\"]:PyFunceble.HTTP_CODE[\"\"] = Falseif self.domain_or_ip_to_test: PyFunceble.INTERN[\"\"] = \"\"ExecutionTime(\"\")PyFunceble.CONFIGURATION[\"\"] = FalsePyFunceble.CONFIGURATION[\"\"] = Falseif PyFunceble.CONFIGURATION[\"\"]:domain_or_ip_to_test = domain2idna(self.domain_or_ip_to_test.lower() )else:domain_or_ip_to_test = (self.domain_or_ip_to_test.lower() ) self.domain(domain_or_ip_to_test)elif self.url_to_test and not self.file_path: PyFunceble.INTERN[\"\"] = \"\"ExecutionTime(\"\")PyFunceble.CONFIGURATION[\"\"] = Falseself.url(self.checker.is_url_valid(self.url_to_test, return_formatted=True,))elif (self._entry_management_url_download(self.url_file )or self.url_file ):PyFunceble.CONFIGURATION[\"\"] = PyFunceble.CONFIGURATION[\"\"] = PyFunceble.CONFIGURATION[\"\"] = TruePyFunceble.CONFIGURATION[\"\"] = FalsePyFunceble.INTERN[\"\"] = \"\"self.file_url()elif (self._entry_management_url_download(self.link_to_test )or self._entry_management_url_download(self.file_path ) or self.file_path ):PyFunceble.INTERN[\"\"] = \"\"self.file()else:print(PyFunceble.Fore.CYAN + PyFunceble.Style.BRIGHT + \"\")if (self.domain_or_ip_to_test or self.url_to_test ):ExecutionTime(\"\", last=True)self.percentage.log()self.colorify_logo()PyFunceble.stay_safe()else:PyFunceble.CONFIGURATION[\"\"] = PyFunceble.CONFIGURATION[\"\"] = PyFunceble.CONFIGURATION[\"\"] = TruePyFunceble.CONFIGURATION[\"\"] = PyFunceble.CONFIGURATION[\"\"] = PyFunceble.CONFIGURATION[\"\"] = PyFunceble.CONFIGURATION[\"\"] = Falseif self.domain_or_ip_to_test: PyFunceble.INTERN[\"\"] = \"\"PyFunceble.INTERN[\"\"] = self.domain_or_ip_to_test.lower() elif self.url_to_test: PyFunceble.INTERN[\"\"] = \"\"PyFunceble.INTERN[\"\"] = self.url_to_test", "docstring": "Avoid to have 1 millions line into self.__init__()", "id": "f8244:c0:m3"} {"signature": "@classmethoddef colorify_logo(cls, home=False):", "body": "if not PyFunceble.CONFIGURATION[\"\"]:to_print = []if home:for line in PyFunceble.ASCII_PYFUNCEBLE.split(\"\"):to_print.append(PyFunceble.Fore.YELLOW + line + PyFunceble.Fore.RESET)elif PyFunceble.INTERN[\"\"][\"\"][\"\"] >= :for line in PyFunceble.ASCII_PYFUNCEBLE.split(\"\"):to_print.append(PyFunceble.Fore.GREEN + line + PyFunceble.Fore.RESET)else:for line in PyFunceble.ASCII_PYFUNCEBLE.split(\"\"):to_print.append(PyFunceble.Fore.RED + line + PyFunceble.Fore.RESET)print(\"\".join(to_print))", "docstring": "Print the colored logo based on global results.\n\n:param home: Tell us if we have to print the initial coloration.\n:type home: bool", "id": "f8244:c0:m12"} {"signature": "def _entry_management_url(self):", "body": "if (self.url_file and not self._entry_management_url_download(self.url_file )): PyFunceble.INTERN[\"\"] = self.url_file", "docstring": "Manage the loading of the url system.", "id": "f8244:c0:m2"} {"signature": "@classmethoddef reset_counters(cls):", "body": "for string in [\"\", \"\", \"\", \"\"]:PyFunceble.INTERN[\"\"][\"\"].update({string: })", "docstring": "Reset the counters when needed.", "id": "f8244:c0:m11"} {"signature": "@classmethoddef _get_current_version_yaml(cls):", "body": "return Dict().from_yaml(File(PyFunceble.CURRENT_DIRECTORY + \"\").read())", "docstring": "Get and return the content of version.yaml", "id": "f8246:c0:m2"} {"signature": "def _update_code_urls(self):", "body": "to_ignore = [\"\", \"\"]for root, _, files in PyFunceble.walk(PyFunceble.CURRENT_DIRECTORY+ PyFunceble.directory_separator+ \"\"+ PyFunceble.directory_separator):for file in files:if file not in to_ignore and \"\" not in root:if root.endswith(PyFunceble.directory_separator):self._update_docs(root + file)else:self._update_docs(root + PyFunceble.directory_separator + file)for root, _, files in PyFunceble.walk(PyFunceble.CURRENT_DIRECTORY+ PyFunceble.directory_separator+ \"\"+ PyFunceble.directory_separator):for file in files:if file not in to_ignore and \"\" not in root:if root.endswith(PyFunceble.directory_separator):self._update_docs(root + file)else:self._update_docs(root + PyFunceble.directory_separator + file)", "docstring": "Read the code and update all links.", "id": "f8246:c0:m1"} {"signature": "def is_time_older(self):", "body": "if (self._authorization()and self.is_in_database()and int(PyFunceble.INTERN[\"\"][PyFunceble.INTERN[\"\"]][PyFunceble.INTERN[\"\"]][\"\"])< int(PyFunceble.time())):return Truereturn False", "docstring": "Check if the current time is older than the one in the database.", "id": "f8247:c1:m5"} {"signature": "def _backup(self):", "body": "if self._authorization():Dict(PyFunceble.INTERN[\"\"]).to_json(self.whois_db_path)", "docstring": "Backup the database into its file.", "id": "f8247:c1:m3"} {"signature": "def get_expiration_date(self):", "body": "if self._authorization() and self.is_in_database() and not self.is_time_older():result = PyFunceble.INTERN[\"\"][PyFunceble.INTERN[\"\"]][PyFunceble.INTERN[\"\"]][\"\"]if result:return resultreturn None", "docstring": "Get the expiration date from the database.\n\n:return: The expiration date from the database.\n:rtype: str|None", "id": "f8247:c1:m6"} {"signature": "@classmethoddef _authorization(cls):", "body": "if (not PyFunceble.CONFIGURATION[\"\"]and PyFunceble.CONFIGURATION[\"\"]):return Truereturn False", "docstring": "Check if we are authorized to work with our database.", "id": "f8247:c1:m1"} {"signature": "def _backup(self):", "body": "if PyFunceble.CONFIGURATION[\"\"]:Dict(PyFunceble.INTERN[\"\"]).to_json(self.inactive_db_path)", "docstring": "Save the current database into the inactive-db.json file.", "id": "f8247:c0:m4"} {"signature": "@classmethoddef _convert_or_shorten_month(cls, data):", "body": "short_month = {\"\": [str(), \"\", \"\", \"\"],\"\": [str(), \"\", \"\", \"\"],\"\": [str(), \"\", \"\", \"\"],\"\": [str(), \"\", \"\", \"\"],\"\": [str(), \"\", \"\"],\"\": [str(), \"\", \"\", \"\"],\"\": [str(), \"\", \"\", \"\"],\"\": [str(), \"\", \"\", \"\"],\"\": [str(), \"\", \"\", \"\"],\"\": [str(), \"\", \"\"],\"\": [str(), \"\", \"\"],\"\": [str(), \"\", \"\"],}for month in short_month:if data in short_month[month]:return monthreturn data", "docstring": "Convert a given month into our unified format.\n\n:param data: The month to convert or shorten.\n:type data: str\n\n:return: The unified month name.\n:rtype: str", "id": "f8249:c0:m3"} {"signature": "def get(self): ", "body": "domain_validation = self.checker.is_domain_valid()ip_validation = self.checker.is_ip_valid()if \"\" in PyFunceble.INTERN:PyFunceble.INTERN[\"\"].update({\"\": domain_validation,\"\": ip_validation,})if (domain_validationand not ip_validationor domain_validationor PyFunceble.CONFIGURATION[\"\"]):PyFunceble.INTERN.update({\"\": HTTPCode().get(), \"\": Referer().get()})if not PyFunceble.INTERN[\"\"]:return PyFunceble.INTERN[\"\"]if PyFunceble.INTERN[\"\"] and not self.checker.is_subdomain():return self._extract()Logs().whois(self.whois_record)return Noneif (ip_validationand not domain_validationor ip_validationor PyFunceble.CONFIGURATION[\"\"]):PyFunceble.INTERN[\"\"] = HTTPCode().get()Logs().whois(self.whois_record)return NoneLogs().whois(self.whois_record)return False", "docstring": "Execute the logic behind the meaning of ExpirationDate + return the matched status.\n\n:return:\n The status of the tested domain.\n Can be one of the official status.\n:rtype: str", "id": "f8249:c0:m1"} {"signature": "@classmethoddef handle(cls, status, invalid_source=\"\"):", "body": "if status.lower() not in PyFunceble.STATUS[\"\"][\"\"]:source = \"\"if Lookup().nslookup():status, source = cls.extra_rules.handle(PyFunceble.STATUS[\"\"][\"\"], source)Generate(status, source).status_file()return status, sourcestatus, source = cls.extra_rules.handle(PyFunceble.STATUS[\"\"][\"\"], source)Generate(status, source).status_file()return status, sourcestatus, source = cls.extra_rules.handle(PyFunceble.STATUS[\"\"][\"\"], invalid_source)Generate(status, source).status_file()return status, source", "docstring": "Handle the lack of WHOIS and expiration date. :smile_cat:\n\n:param matched_status: The status that we have to handle.\n:type status: str\n\n:param invalid_source:\n The source to set when we handle INVALID element.\n:type invalid_source: str\n\n:return:\n The strus of the domain after generating the files desired\n by the user.\n:rtype: str", "id": "f8251:c0:m2"} {"signature": "def get_account_hash(self):", "body": "return self.storage_path.rsplit('', )[]", "docstring": "See :py:func:`swiftly.client.client.Client.get_account_hash`", "id": "f8256:c0:m3"} {"signature": "@contextlib.contextmanagerdef with_client(self):", "body": "client = self.get_client()yield clientself.put_client(client)", "docstring": "A context manager that obtains a client for use, whether an\nexisting unused client or a brand new one if none are\navailable.", "id": "f8257:c0:m3"} {"signature": "def reset(self):", "body": "pass", "docstring": "Resets the client, closing any connections and discarding any\nstate. This can be useful if some exceptional condition\noccurred and the request/response state can no longer be\ncertain.", "id": "f8258:c0:m1"} {"signature": "def post_account(self, headers=None, query=None, cdn=False, body=None):", "body": "return self.request('', '', body or '', headers, query=query, cdn=cdn)", "docstring": "POSTs the account and returns the results. This is usually\ndone to set X-Account-Meta-xxx headers. Note that any existing\nX-Account-Meta-xxx headers will remain untouched. To remove an\nX-Account-Meta-xxx header, send the header with an empty\nstring as its value.\n\n:param headers: Additional headers to send with the request.\n:param query: Set to a dict of query values to send on the\n query string of the request.\n:param cdn: If set True, the CDN management interface will be\n used.\n:param body: No known Swift POSTs take a body; but the option\n is there for the future.\n:returns: A tuple of (status, reason, headers, contents).\n\n :status: is an int for the HTTP status code.\n :reason: is the str for the HTTP status (ex: \"Ok\").\n :headers: is a dict with all lowercase keys of the HTTP\n headers; if a header has multiple values, it will be a\n list.\n :contents: is the str for the HTTP body.", "id": "f8258:c0:m10"} {"signature": "def put_container(self, container, headers=None, query=None, cdn=False,body=None):", "body": "path = self._container_path(container)return self.request('', path, body or '', headers, query=query, cdn=cdn)", "docstring": "PUTs the container and returns the results. This is usually\ndone to create new containers and can also be used to set\nX-Container-Meta-xxx headers. Note that if the container\nalready exists, any existing X-Container-Meta-xxx headers will\nremain untouched. To remove an X-Container-Meta-xxx header,\nsend the header with an empty string as its value.\n\n:param container: The name of the container.\n:param headers: Additional headers to send with the request.\n:param query: Set to a dict of query values to send on the\n query string of the request.\n:param cdn: If set True, the CDN management interface will be\n used.\n:param body: Some container PUT requests, like the\n extract-archive bulk upload request, take a body.\n:returns: A tuple of (status, reason, headers, contents).\n\n :status: is an int for the HTTP status code.\n :reason: is the str for the HTTP status (ex: \"Ok\").\n :headers: is a dict with all lowercase keys of the HTTP\n headers; if a header has multiple values, it will be a\n list.\n :contents: is the str for the HTTP body.", "id": "f8258:c0:m14"} {"signature": "def get_account(self, headers=None, prefix=None, delimiter=None,marker=None, end_marker=None, limit=None, query=None,cdn=False, decode_json=True):", "body": "query = dict(query or {})query[''] = ''if prefix:query[''] = prefixif delimiter:query[''] = delimiterif marker:query[''] = markerif end_marker:query[''] = end_markerif limit:query[''] = limitreturn self.request('', '', '', headers, decode_json=decode_json, query=query,cdn=cdn)", "docstring": "GETs the account and returns the results. This is done to list\nthe containers for the account. Some useful headers are also\nreturned:\n\n=========================== =================================\nx-account-bytes-used Object storage used for the\n account, in bytes.\nx-account-container-count The number of containers in the\n account.\nx-account-object-count The number of objects in the\n account.\n=========================== =================================\n\nAlso, any user headers beginning with x-account-meta- are\nreturned.\n\nThese values can be delayed depending the Swift cluster.\n\n:param headers: Additional headers to send with the request.\n:param prefix: The prefix container names must match to be\n listed.\n:param delimiter: The delimiter for the listing. Delimiters\n indicate how far to progress through container names\n before \"rolling them up\". For instance, a delimiter='.'\n query on an account with the containers::\n\n one.one\n one.two\n two\n three.one\n\n would return the JSON value of::\n\n [{'subdir': 'one.'},\n {'count': 0, 'bytes': 0, 'name': 'two'},\n {'subdir': 'three.'}]\n\n Using this with prefix can allow you to traverse a psuedo\n hierarchy.\n:param marker: Only container names after this marker will be\n returned. Swift returns a limited number of containers\n per request (often 10,000). To get the next batch of\n names, you issue another query with the marker set to the\n last name you received. You can continue to issue\n requests until you receive no more names.\n:param end_marker: Only container names before this marker will be\n returned.\n:param limit: Limits the size of the list returned per\n request. The default and maximum depends on the Swift\n cluster (usually 10,000).\n:param query: Set to a dict of query values to send on the\n query string of the request.\n:param cdn: If set True, the CDN management interface will be\n used.\n:param decode_json: If set False, the usual decoding of the\n JSON response will be skipped and the raw contents will\n be returned instead.\n:returns: A tuple of (status, reason, headers, contents).\n\n :status: is an int for the HTTP status code.\n :reason: is the str for the HTTP status (ex: \"Ok\").\n :headers: is a dict with all lowercase keys of the HTTP\n headers; if a header has multiple values, it will be a\n list.\n :contents: is the decoded JSON response or the raw str\n for the HTTP body.", "id": "f8258:c0:m8"} {"signature": "def get_object(self, container, obj, headers=None, stream=True, query=None,cdn=False):", "body": "path = self._object_path(container, obj)return self.request('', path, '', headers, query=query, stream=stream, cdn=cdn)", "docstring": "GETs the object and returns the results.\n\n:param container: The name of the container.\n:param obj: The name of the object.\n:param headers: Additional headers to send with the request.\n:param stream: Indicates whether to stream the contents or\n preread them fully and return them as a str. Default:\n True to stream the contents. When streaming, contents\n will have the standard file-like-object read function,\n which accepts an optional size parameter to limit how\n much data is read per call. When streaming is on, be\n certain to fully read the contents before issuing another\n request.\n:param query: Set to a dict of query values to send on the\n query string of the request.\n:param cdn: If set True, the CDN management interface will be\n used.\n:returns: A tuple of (status, reason, headers, contents).\n\n :status: is an int for the HTTP status code.\n :reason: is the str for the HTTP status (ex: \"Ok\").\n :headers: is a dict with all lowercase keys of the HTTP\n headers; if a header has multiple values, it will be a\n list.\n :contents: if *stream* was True, *contents* is a\n file-like-object of the contents of the HTTP body. If\n *stream* was False, *contents* is just a simple str of\n the HTTP body.", "id": "f8258:c0:m18"} {"signature": "def delete_account(self, headers=None,yes_i_mean_delete_the_account=False, query=None,cdn=False, body=None):", "body": "if not yes_i_mean_delete_the_account and (not body or not query or '' not in query):return (, '', {},'')return self.request('', '', body or '', headers, query=query, cdn=cdn)", "docstring": "Sends a DELETE request to the account and returns the results.\n\nWith ``query['bulk-delete'] = ''`` this might mean a bulk\ndelete request where the body of the request is new-line\nseparated, url-encoded list of names to delete. Be careful\nwith this! One wrong move and you might mark your account for\ndeletion of you have the access to do so!\n\nFor a plain DELETE to the account, on clusters that support\nit and, assuming you have permissions to do so, the account\nwill be marked as deleted and immediately begin removing the\nobjects from the cluster in the backgound.\n\nTHERE IS NO GOING BACK!\n\n:param headers: Additional headers to send with the request.\n:param yes_i_mean_delete_the_account: Set to True to verify\n you really mean to delete the entire account. This is\n required unless ``body and 'bulk-delete' in query``.\n:param query: Set to a dict of query values to send on the\n query string of the request.\n:param cdn: If set True, the CDN management interface will be\n used.\n:param body: Some account DELETE requests, like the bulk\n delete request, take a body.\n:returns: A tuple of (status, reason, headers, contents).\n\n :status: is an int for the HTTP status code.\n :reason: is the str for the HTTP status (ex: \"Ok\").\n :headers: is a dict with all lowercase keys of the HTTP\n headers; if a header has multiple values, it will be\n a list.\n :contents: is the str for the HTTP body.", "id": "f8258:c0:m11"} {"signature": "def quote(value, safe=''):", "body": "if isinstance(value, six.text_type):value = value.encode('')elif not isinstance(value, six.string_types):value = str(value)return parse.quote(value, safe)", "docstring": "Much like parse.quote in that it returns a URL encoded string\nfor the given value, protecting the safe characters; but this\nversion also ensures the value is UTF-8 encoded.", "id": "f8262:m2"} {"signature": "def write_headers(self, fp, headers, mute=None):", "body": "if headers:if not mute:mute = []fmt = '' % (max(len(k) for k in headers) + )for key in sorted(headers):if key in mute:continuefp.write(fmt % (key.title() + '', headers[key]))fp.flush()", "docstring": "Convenience function to output headers in a formatted fashion\nto a file-like fp, optionally muting any headers in the mute\nlist.", "id": "f8270:c0:m3"} {"signature": "def copy(self):", "body": "context = CLIContext()for item in dir(self):if item[] != '' and item not in ('', ''):setattr(context, item, getattr(self, item))return context", "docstring": "Returns a new CLIContext instance that is a shallow copy of\nthe original, much like dict's copy method.", "id": "f8270:c0:m2"} {"signature": "def cli_get_account_listing(context):", "body": "limit = context.query.get('')delimiter = context.query.get('')prefix = context.query.get('')marker = context.query.get('')end_marker = context.query.get('')if context.raw:with context.client_manager.with_client() as client:status, reason, headers, contents = client.get_account(decode_json=False, headers=context.headers, limit=limit,marker=marker, end_marker=end_marker, query=context.query,cdn=context.cdn)if hasattr(contents, ''):contents = contents.read()if status // != :if status == and context.ignore_404:returnraise ReturnCode('' % (status, reason))with context.io_manager.with_stdout() as fp:if context.output_headers:context.write_headers(fp, headers, context.muted_account_headers)fp.write(contents)fp.flush()returnwith context.client_manager.with_client() as client:status, reason, headers, contents = client.get_account(headers=context.headers, limit=limit, marker=marker,end_marker=end_marker, query=context.query, cdn=context.cdn)if status // != :if status == and context.ignore_404:returnif hasattr(contents, ''):contents.read()raise ReturnCode('' % (status, reason))if context.output_headers and not context.all_objects:with context.io_manager.with_stdout() as fp:context.write_headers(fp, headers, context.muted_account_headers)while contents:if context.all_objects:new_context = context.copy()new_context.query = dict(new_context.query)for remove in ('', '', '', '', ''):if remove in new_context.query:del new_context.query[remove]for item in contents:if '' in item:new_path = item[''].encode('')cli_get_container_listing(new_context, new_path)else:with context.io_manager.with_stdout() as fp:for item in contents:if context.full:fp.write('' % (item.get('', ''),item.get('', '')))fp.write(item.get('', item.get('')))fp.write('')fp.flush()if limit:breakmarker = contents[-].get('', contents[-].get('', ''))with context.client_manager.with_client() as client:status, reason, headers, contents = client.get_account(headers=context.headers, limit=limit, delimiter=delimiter,prefix=prefix, end_marker=end_marker, marker=marker,query=context.query, cdn=context.cdn)if status // != :if status == and context.ignore_404:returnif hasattr(contents, ''):contents.read()raise ReturnCode('' % (status, reason))", "docstring": "Performs a GET on the account as a listing request.\n\nSee :py:mod:`swiftly.cli.get` for context usage information.\n\nSee :py:class:`CLIGet` for more information.", "id": "f8273:m0"} {"signature": "def cli_get(context, path=None):", "body": "path = path.lstrip('') if path else Noneif not path:return cli_get_account_listing(context)elif '' not in path.rstrip(''):return cli_get_container_listing(context, path)status, reason, headers, contents = , '', {}, ''with context.client_manager.with_client() as client:status, reason, headers, contents = client.get_object(*path.split('', ), headers=context.headers, query=context.query,cdn=context.cdn)if status // != :if status == and context.ignore_404:returnif hasattr(contents, ''):contents.read()raise ReturnCode('' % (path, status, reason))if context.decrypt:crypt_type = contents.read()if crypt_type == AES256CBC:contents = FileLikeIter(aes_decrypt(context.decrypt, contents,chunk_size=getattr(client, '', )))else:raise ReturnCode('''' % (path, crypt_type))def disk_closed_callback(disk_path):if context.remove_empty_files and not os.path.getsize(disk_path):os.unlink(disk_path)if context.io_manager.stdout_root:dirname = os.path.dirname(disk_path)while dirname and dirname.startswith(context.io_manager.stdout_root):try:os.rmdir(dirname)except OSError:passdirname = os.path.dirname(dirname)returnif (headers.get('') in['', ''] andheaders.get('') == ''):os.unlink(disk_path)os.makedirs(disk_path)mtime = if '' in headers:mtime = float(headers[''])elif '' in headers:mtime = time.mktime(time.strptime(headers[''], ''))if mtime:os.utime(disk_path, (mtime, mtime))out_path = pathif context.suppress_container_name:out_path = out_path.split('', )[]out_path = context.io_manager.client_path_to_os_path(out_path)with context.io_manager.with_stdout(out_path, disk_closed_callback=disk_closed_callback) as fp:if context.output_headers:context.write_headers(fp, headers, context.muted_object_headers)fp.write('')chunk = contents.read()while chunk:fp.write(chunk)chunk = contents.read()fp.flush()", "docstring": "Performs a GET on the item (account, container, or object).\n\nSee :py:mod:`swiftly.cli.get` for context usage information.\n\nSee :py:class:`CLIGet` for more information.", "id": "f8273:m2"} {"signature": "def cli_get_container_listing(context, path=None):", "body": "path = path.strip('') if path else Noneif not path or '' in path:raise ReturnCode('' %path)context.suppress_container_name = Truelimit = context.query.get('')delimiter = context.query.get('')prefix = context.query.get('')marker = context.query.get('')end_marker = context.query.get('')if context.raw:with context.client_manager.with_client() as client:status, reason, headers, contents = client.get_container(path, decode_json=False, headers=context.headers, limit=limit,marker=marker, end_marker=end_marker, query=context.query,cdn=context.cdn)if hasattr(contents, ''):contents = contents.read()if status // != :if status == and context.ignore_404:returnraise ReturnCode('' % (path, status, reason))with context.io_manager.with_stdout() as fp:if context.output_headers:context.write_headers(fp, headers, context.muted_container_headers)fp.write(contents)fp.flush()returnwith context.client_manager.with_client() as client:status, reason, headers, contents = client.get_container(path, headers=context.headers, limit=limit, delimiter=delimiter,prefix=prefix, marker=marker, end_marker=end_marker,query=context.query, cdn=context.cdn)if status // != :if status == and context.ignore_404:returnif hasattr(contents, ''):contents.read()raise ReturnCode('' % (path, status, reason))if context.output_headers and not context.all_objects:with context.io_manager.with_stdout() as fp:context.write_headers(fp, headers, context.muted_container_headers)conc = Concurrency(context.concurrency)while contents:if context.all_objects:new_context = context.copy()new_context.query = dict(new_context.query)for remove in ('', '', '', '', ''):if remove in new_context.query:del new_context.query[remove]for item in contents:if '' in item:for (exc_type, exc_value, exc_tb, result) insix.itervalues(conc.get_results()):if exc_value:conc.join()raise exc_valuenew_path = path + '' + item[''].encode('')conc.spawn(new_path, cli_get, new_context, new_path)else:with context.io_manager.with_stdout() as fp:for item in contents:if context.full:fp.write('' % (item.get('', ''),item.get('', '')[:].replace('', ''),item.get('', ''),item.get('', '')))fp.write(item.get('', item.get('')))fp.write('')fp.flush()if limit:breakmarker = contents[-].get('', contents[-].get('', ''))with context.client_manager.with_client() as client:status, reason, headers, contents = client.get_container(path, headers=context.headers, limit=limit,delimiter=delimiter, prefix=prefix, end_marker=end_marker,marker=marker, query=context.query, cdn=context.cdn)if status // != :if status == and context.ignore_404:returnif hasattr(contents, ''):contents.read()raise ReturnCode('' % (path, status, reason))conc.join()for (exc_type, exc_value, exc_tb, result) insix.itervalues(conc.get_results()):if exc_value:raise exc_value", "docstring": "Performs a GET on the container as a listing request.\n\nSee :py:mod:`swiftly.cli.get` for context usage information.\n\nSee :py:class:`CLIGet` for more information.", "id": "f8273:m1"} {"signature": "def cli_help(context, command_name, general_parser, command_parsers):", "body": "if command_name == '':command_name = ''with context.io_manager.with_stdout() as stdout:if not command_name:general_parser.print_help(stdout)elif command_name in command_parsers:command_parsers[command_name].option_parser.print_help(stdout)else:raise ReturnCode('' % command_name)", "docstring": "Outputs help information.\n\nSee :py:mod:`swiftly.cli.help` for context usage information.\n\nSee :py:class:`CLIHelp` for more information.\n\n:param context: The :py:class:`swiftly.cli.context.CLIContext` to\n use.\n:param command_name: The command_name to output help information\n for, or set to None or an empty string to output the general\n help information.\n:param general_parser: The\n :py:class:`swiftly.cli.optionparser.OptionParser` for general\n usage.\n:param command_parsers: A dict of (name, :py:class:`CLICommand`)\n for specific command usage.", "id": "f8276:m0"} {"signature": "def _stderr_filed(func):", "body": "def wrapper(self, msg, file=None):if file:return func(self, msg, file=file)elif self.io_manager:with self.io_manager.with_stderr() as stderr:return func(self, msg, file=stderr)else:return func(self, msg, file=sys.stderr)wrapper.__doc__ = func.__doc__return wrapper", "docstring": "Instance method decorator to convert an optional file keyword\nargument into an actual value, whether it be a passed value, a\nvalue obtained from an io_manager, or sys.stderr.", "id": "f8281:m1"} {"signature": "def get_stdout(self, os_path=None, skip_sub_command=False):", "body": "sub_command = None if skip_sub_command else self.stdout_sub_commandout, path = self._get_out_and_path(self.stdout, self.stdout_root, sub_command, os_path)if hasattr(out, ''):return out.stdinreturn out", "docstring": "Returns a stdout-suitable file-like object based on the\noptional os_path and optionally skipping any configured\nsub-command.", "id": "f8283:c0:m7"} {"signature": "def get_stdin(self, os_path=None, skip_sub_command=False):", "body": "sub_command = None if skip_sub_command else self.stdin_sub_commandinn, path = self._get_in_and_path(self.stdin, self.stdin_root, sub_command, os_path)if hasattr(inn, ''):return inn.stdoutreturn inn", "docstring": "Returns a stdin-suitable file-like object based on the\noptional os_path and optionally skipping any configured\nsub-command.", "id": "f8283:c0:m6"} {"signature": "def join(self):", "body": "if self._pool:self._pool.waitall()", "docstring": "Blocks until all currently pending functions have finished.", "id": "f8286:c0:m4"} {"signature": "def get_location_observation(lat, lng, token):", "body": "req = requests.get(API_ENDPOINT_GEO % (lat, lng),params={'': token})if req.status_code == and req.json()[\"\"] == \"\":return parse_observation_response(req.json()[\"\"])return {}", "docstring": "Lookup observations by geo coordinates.", "id": "f8291:m1"} {"signature": "def create_joining_subplan(pipeline_def, solid, join_step_key, parallel_steps, parallel_step_output):", "body": "check.inst_param(pipeline_def, '', PipelineDefinition)check.inst_param(solid, '', Solid)check.str_param(join_step_key, '')check.list_param(parallel_steps, '', of_type=ExecutionStep)check.str_param(parallel_step_output, '')for parallel_step in parallel_steps:check.invariant(parallel_step.has_step_output(parallel_step_output))join_step = create_join_step(pipeline_def, solid, join_step_key, parallel_steps, parallel_step_output)output_name = join_step.step_outputs[].namereturn ExecutionValueSubplan(parallel_steps + [join_step], StepOutputHandle.from_step(join_step, output_name))", "docstring": "This captures a common pattern of fanning out a single value to N steps,\nwhere each step has similar structure. The strict requirement here is that each step\nmust provide an output named the parameters parallel_step_output.\n\nThis takes those steps and then uses a join node to coalesce them so that downstream\nsteps can depend on a single output.\n\nCurrently the join step just does a passthrough with no computation. It remains\nto be seen if there should be any work or verification done in this step, especially\nin multi-process environments that require persistence between steps.", "id": "f8357:m2"} {"signature": "def as_dagster_type(existing_type,name=None,description=None,input_schema=None,output_schema=None,serialization_strategy=None,storage_plugins=None,):", "body": "check.type_param(existing_type, '')check.opt_str_param(name, '')check.opt_str_param(description, '')check.opt_inst_param(input_schema, '', InputSchema)check.opt_inst_param(output_schema, '', OutputSchema)check.opt_inst_param(serialization_strategy, '', SerializationStrategy)storage_plugins = check.opt_dict_param(storage_plugins, '')if serialization_strategy is None:serialization_strategy = PickleSerializationStrategy()name = existing_type.__name__ if name is None else namereturn _decorate_as_dagster_type(existing_type,key=name,name=name,description=description,input_schema=input_schema,output_schema=output_schema,serialization_strategy=serialization_strategy,storage_plugins=storage_plugins,)", "docstring": "Takes a python cls and creates a type for it in the Dagster domain.\n\nArgs:\n existing_type (cls)\n The python type you want to project in to the Dagster type system.\n name (Optional[str]):\n description (Optiona[str]):\n input_schema (Optional[InputSchema]):\n An instance of a class that inherits from :py:class:`InputSchema` that\n can map config data to a value of this type.\n\n output_schema (Optiona[OutputSchema]):\n An instance of a class that inherits from :py:class:`OutputSchema` that\n can map config data to persisting values of this type.\n\n serialization_strategy (Optional[SerializationStrategy]):\n The default behavior for how to serialize this value for\n persisting between execution steps.\n\n storage_plugins (Optional[Dict[RunStorageMode, TypeStoragePlugin]]):\n Storage type specific overrides for the serialization strategy.\n This allows for storage specific optimzations such as effecient\n distributed storage on S3.", "id": "f8366:m6"} {"signature": "def output_schema(config_cls):", "body": "config_type = resolve_config_cls_arg(config_cls)return lambda func: _create_output_schema(config_type, func)", "docstring": "A decorator for a annotating a function that can take a ``config_value``\nand an instance of a custom type and materialize it.\n\nArgs:\n config_cls (Any):", "id": "f8368:m6"} {"signature": "def materialize_runtime_value(self, _context, _config_value, _runtime_value):", "body": "check.not_implemented('')", "docstring": "How to materialize a runtime value given configuration.", "id": "f8368:c1:m1"} {"signature": "def input_schema(config_cls):", "body": "config_type = resolve_config_cls_arg(config_cls)return lambda func: _create_input_schema(config_type, func)", "docstring": "A decorator for annotating a function that can turn a ``config_value`` in to\nan instance of a custom type.\n\nArgs:\n config_cls (Any):", "id": "f8368:m3"} {"signature": "def List(inner_type):", "body": "return WrappingListType(inner_type)", "docstring": "Validates at runtime that the value is ``List[inner_type]``.\n\nArgs:\n inner_type (DagsterType)", "id": "f8371:m0"} {"signature": "def NamedSelector(name, fields, description=None, type_attributes=DEFAULT_TYPE_ATTRIBUTES):", "body": "check.str_param(name, '')check_user_facing_fields_dict(fields, ''.format(name))class _NamedSelector(_ConfigSelector):def __init__(self):super(_NamedSelector, self).__init__(key=name,name=name,fields=fields,description=description,type_attributes=type_attributes,)return _NamedSelector", "docstring": "A :py:class`Selector` with a name, allowing it to be referenced by that name.\n\nArgs:\n name (str):\n fields (Dict[str, Field])", "id": "f8375:m10"} {"signature": "def Dict(fields):", "body": "check_user_facing_fields_dict(fields, '')class _Dict(_ConfigComposite):def __init__(self):key = '' + str(DictCounter.get_next_count())super(_Dict, self).__init__(name=None,key=key,fields=fields,description='',type_attributes=ConfigTypeAttributes(is_builtin=True),)return _Dict", "docstring": "Schema for configuration data with string keys and typed values via :py:class:`Field` .\n\nArgs:\n fields (Dict[str, Field])", "id": "f8375:m7"} {"signature": "def construct_json_event_logger(json_path):", "body": "check.str_param(json_path, '')return construct_single_handler_logger(\"\",DEBUG,JsonEventLoggerHandler(json_path,lambda record: construct_event_record(StructuredLoggerMessage(name=record.name,message=record.msg,level=record.levelno,meta=record.dagster_meta,record=record,)),),)", "docstring": "Record a stream of event records to json", "id": "f8385:m3"} {"signature": "@staticmethoddef passthrough_context_definition(context_params):", "body": "check.inst_param(context_params, '', ExecutionContext)context_definition = PipelineContextDefinition(context_fn=lambda *_args: context_params)return {DEFAULT_CONTEXT_NAME: context_definition}", "docstring": "Create a context definition from a pre-existing context. This can be useful\n in testing contexts where you may want to create a context manually and then\n pass it into a one-off PipelineDefinition\n\n Args:\n context (ExecutionContext): The context that will provided to the pipeline.\n Returns:\n PipelineContextDefinition: The passthrough context definition.", "id": "f8392:c0:m0"} {"signature": "@propertydef display_name(self):", "body": "return self.name if self.name else ''", "docstring": "Name suitable for exception messages, logging etc. If pipeline\n is unnamed the method with return \"<>\".\n\n Returns:\n str: Display name of pipeline", "id": "f8395:c0:m1"} {"signature": "def get_all_pipelines(self):", "body": "pipelines = list(map(self.get_pipeline, self.pipeline_dict.keys()))self._construct_solid_defs(pipelines)return pipelines", "docstring": "Return all pipelines as a list\n\n Returns:\n List[PipelineDefinition]:", "id": "f8397:c0:m3"} {"signature": "def lambda_solid(name=None, inputs=None, output=None, description=None):", "body": "output = output or OutputDefinition()if callable(name):check.invariant(inputs is None)check.invariant(description is None)return _LambdaSolid(output=output)(name)return _LambdaSolid(name=name, inputs=inputs, output=output, description=description)", "docstring": "(decorator) Create a simple solid.\n\n This shortcut allows the creation of simple solids that do not require\n configuration and whose implementations do not require a context.\n\n Lambda solids take inputs and produce a single output. The body of the function\n should return a single value.\n\n Args:\n name (str): Name of solid.\n inputs (list[InputDefinition]): List of inputs.\n output (OutputDefinition): The output of the solid. Defaults to ``OutputDefinition()``.\n description (str): Solid description.\n\n Examples:\n\n .. code-block:: python\n\n @lambda_solid\n def hello_world():\n return 'hello'\n\n @lambda_solid(inputs=[InputDefinition(name='foo')])\n def hello_world(foo):\n return foo", "id": "f8402:m0"} {"signature": "def SystemNamedDict(name, fields, description=None):", "body": "return NamedDict(name, fields, description, ConfigTypeAttributes(is_system_config=True))", "docstring": "A SystemNamedDict object is simply a NamedDict intended for internal (dagster) use.", "id": "f8404:m0"} {"signature": "@contextmanagerdef user_code_context_manager(user_fn, error_cls, msg):", "body": "check.callable_param(user_fn, '')check.subclass_param(error_cls, '', DagsterUserCodeExecutionError)with user_code_error_boundary(error_cls, msg):thing_or_gen = user_fn()gen = _ensure_gen(thing_or_gen)try:thing = next(gen)except StopIteration:check.failed('')yield thingstopped = Falsetry:next(gen)except StopIteration:stopped = Truecheck.invariant(stopped, '')", "docstring": "Wraps the output of a user provided function that may yield or return a value and\n returns a generator that asserts it only yields a single value.", "id": "f8406:m4"} {"signature": "def execute_pipeline_iterator(pipeline, environment_dict=None, run_config=None):", "body": "check.inst_param(pipeline, '', PipelineDefinition)environment_dict = check.opt_dict_param(environment_dict, '')run_config = check_run_config_param(run_config)environment_config = create_environment_config(pipeline, environment_dict)intermediates_manager = construct_intermediates_manager(run_config, environment_config, pipeline)with _pipeline_execution_context_manager(pipeline, environment_config, run_config, intermediates_manager) as pipeline_context:return _execute_pipeline_iterator(pipeline_context)", "docstring": "Returns iterator that yields :py:class:`SolidExecutionResult` for each\n solid executed in the pipeline.\n\n This is intended to allow the caller to do things between each executed\n node. For the 'synchronous' API, see :py:func:`execute_pipeline`.\n\n Parameters:\n pipeline (PipelineDefinition): Pipeline to run\n environment_dict (dict): The enviroment configuration that parameterizes this run\n run_config (RunConfig): Configuration for how this pipeline will be executed\n\n Returns:\n Iterator[DagsterEvent]", "id": "f8406:m15"} {"signature": "@propertydef success(self):", "body": "any_success = Falsefor step_event in itertools.chain(self.input_expectations, self.output_expectations, self.transforms):if step_event.event_type == DagsterEventType.STEP_FAILURE:return Falseif step_event.event_type == DagsterEventType.STEP_SUCCESS:any_success = Truereturn any_success", "docstring": "Whether the solid execution was successful", "id": "f8406:c1:m5"} {"signature": "@propertydef skipped(self):", "body": "return all([step_event.event_type == DagsterEventType.STEP_SKIPPEDfor step_event in itertools.chain(self.input_expectations, self.output_expectations, self.transforms)])", "docstring": "Whether the solid execution was skipped", "id": "f8406:c1:m6"} {"signature": "def block(self, text, prefix=''):", "body": "wrapper = TextWrapper(width=self.line_length - len(self.current_indent_str),initial_indent=prefix,subsequent_indent=prefix,break_long_words=False,break_on_hyphens=False,)for line in wrapper.wrap(text):self.line(line)", "docstring": "Automagically wrap a block of text.", "id": "f8411:c0:m3"} {"signature": "def dict_param(obj, param_name, key_type=None, value_type=None):", "body": "if not isinstance(obj, dict):raise_with_traceback(_param_type_mismatch_exception(obj, dict, param_name))if not (key_type or value_type):return objreturn _check_key_value_types(obj, key_type, value_type)", "docstring": "Ensures argument obj is a native Python dictionary, raises an exception if not, and otherwise\n returns obj.", "id": "f8427:m36"} {"signature": "def opt_dict_param(obj, param_name, key_type=None, value_type=None, value_class=None):", "body": "if obj is not None and not isinstance(obj, dict):raise_with_traceback(_param_type_mismatch_exception(obj, dict, param_name))if not obj:return {}if value_class:return _check_key_value_types(obj, key_type, value_type=value_class, value_check=issubclass)return _check_key_value_types(obj, key_type, value_type)", "docstring": "Ensures argument obj is either a dictionary or None; if the latter, instantiates an empty\n dictionary.", "id": "f8427:m37"} {"signature": "def define_spark_config():", "body": "master_url = Field(String,description='',is_optional=False,)deploy_mode = Field(SparkDeployMode,description='''''',is_optional=True,)application_jar = Field(Path,description='''''',is_optional=False,)application_arguments = Field(String,description='',is_optional=True,)spark_home = Field(String,description='',is_optional=True,)spark_outputs = Field(List(String), description='')return Field(Dict(fields={'': master_url,'': deploy_mode,'': application_jar,'': spark_config(),'': spark_home,'': application_arguments,'': spark_outputs,}))", "docstring": "Spark configuration.\n\n See the Spark documentation for reference:\n https://spark.apache.org/docs/latest/submitting-applications.html", "id": "f8532:m0"} {"signature": "def define_bigquery_query_config():", "body": "sf = _define_shared_fields()allow_large_results = Field(Bool,description='''''',is_optional=True,)default_dataset = Field(Dataset,description='''''',is_optional=True,)destination = Field(Table,description='''''',is_optional=True,)dry_run = Field(Bool,description='''''',is_optional=True,)flatten_results = Field(Bool,description='''''',is_optional=True,)maximum_billing_tier = Field(Int,description='''''',is_optional=True,)maximum_bytes_billed = Field(Int,description='''''',is_optional=True,)priority = Field(BQPriority,description='''''',is_optional=True,)query_parameters = Field(List(String),description='''''',is_optional=True,)use_legacy_sql = Field(Bool,description='''''',is_optional=True,)use_query_cache = Field(Bool,description='''''',is_optional=True,)return Field(Dict(fields={'': Field(Dict(fields={'': allow_large_results,'': sf[''],'': sf[''],'': default_dataset,'': destination,'': sf[''],'': dry_run,'': flatten_results,'': maximum_billing_tier,'': maximum_bytes_billed,'': priority,'': query_parameters,'': sf[''],'': sf[''],'': use_legacy_sql,'': use_query_cache,'': sf[''],}))}),description='',)", "docstring": "See:\n https://googleapis.github.io/google-cloud-python/latest/bigquery/generated/google.cloud.bigquery.job.QueryJobConfig.html", "id": "f8539:m2"} {"signature": "def _is_valid_table(config_value):", "body": "return re.match(r''+ RE_PROJECT + r'' + RE_DS_TABLE + r'' + RE_DS_TABLE + r'' + RE_DS_TABLE + r'' + RE_DS_TABLE + r'',config_value,)", "docstring": "Tables must be of form \"project.dataset.table\" or \"dataset.table\"", "id": "f8541:m1"} {"signature": "def format_config_for_graphql(config):", "body": "def _format_config_subdict(config, current_indent=):check.dict_param(config, '', key_type=str)printer = IndentingStringIoPrinter(indent_level=, current_indent=current_indent)printer.line('')n_elements = len(config)for i, key in enumerate(sorted(config, key=lambda x: x[])):value = config[key]with printer.with_indent():formatted_value = (_format_config_item(value, current_indent=printer.current_indent).lstrip('').rstrip(''))printer.line(''.format(key=key,formatted_value=formatted_value,comma='' if i != n_elements - else '',))printer.line('')return printer.read()def _format_config_sublist(config, current_indent=):printer = IndentingStringIoPrinter(indent_level=, current_indent=current_indent)printer.line('')n_elements = len(config)for i, value in enumerate(config):with printer.with_indent():formatted_value = (_format_config_item(value, current_indent=printer.current_indent).lstrip('').rstrip(''))printer.line(''.format(formatted_value=formatted_value, comma='' if i != n_elements - else ''))printer.line('')return printer.read()def _format_config_item(config, current_indent=):printer = IndentingStringIoPrinter(indent_level=, current_indent=current_indent)if isinstance(config, dict):return _format_config_subdict(config, printer.current_indent)elif isinstance(config, list):return _format_config_sublist(config, printer.current_indent)elif isinstance(config, bool):return repr(config).lower()else:return repr(config).replace('', '')check.dict_param(config, '', key_type=str)if not isinstance(config, dict):check.failed(''.format(item=repr(config)))return _format_config_subdict(config)", "docstring": "This recursive descent thing formats a config dict for GraphQL.", "id": "f8590:m0"} {"signature": "@cli.command()@click.argument('')def release(version):", "body": "check_new_version(version)set_new_version(version)commit_new_version(version)set_git_tag(version)", "docstring": "Tags all submodules for a new release.\n\n Ensures that git tags, as well as the version.py files in each submodule, agree and that the\n new version is strictly greater than the current version. Will fail if the new version\n is not an increment (following PEP 440). Creates a new git tag and commit.", "id": "f8612:m28"} {"signature": "def which_(exe):", "body": "return spawn.find_executable(exe)", "docstring": "Uses distutils to look for an executable, mimicking unix which", "id": "f8612:m1"} {"signature": "def read_config(path):", "body": "config = configparser.ConfigParser()config.read(path)return config", "docstring": "Make a config parser for the given config file.\n\n Return a :class:`configparser.ConfigParser` instance with the given file\n loaded.\n\n :param path:\n Path to the config file to read.", "id": "f8613:m0"} {"signature": "def get_repository_config(self, repository):", "body": "servers = self._read_index_servers()repo_config = self._find_repo_config(servers, repository)return repo_config", "docstring": "Get config dictionary for the given repository.\n\n If the repository section is not found in the config file,\n return ``None``. If the file is invalid, raise\n :exc:`configparser.Error`.\n\n Otherwise return a dictionary with:\n\n * ``'repository'`` -- the repository URL\n * ``'username'`` -- username for authentication\n * ``'password'`` -- password for authentication\n\n :param repository:\n Name or URL of the repository to find in the ``.pypirc`` file.\n The repository section must be defined in the config file.", "id": "f8613:c1:m2"} {"signature": "def mkdir_p(newdir, mode=):", "body": "try:os.makedirs(newdir, mode)except OSError as err:if err.errno != errno.EEXIST or not os.path.isdir(newdir):raise", "docstring": "The missing mkdir -p functionality in os.", "id": "f8619:m0"} {"signature": "@solid(name='',config_field=Field(Dict(fields={'': Field(String, description=''),'': Field(String, description=''),'': Field(Any, description='',is_optional=True,),})),inputs=[InputDefinition('', Bytes, description='')],description='',outputs=[OutputDefinition(String, description='', name=''),OutputDefinition(String, description='', name=''),],)def upload_to_s3(context, file_obj):", "body": "bucket = context.solid_config['']key = context.solid_config['']context.resources.s3.put_object(Bucket=bucket, Body=file_obj.read(), Key=key, **(context.solid_config.get('') or {}))yield Result(bucket, '')yield Result(key, '')", "docstring": "Upload a file to s3.\n\n Args:\n info (ExpectationExecutionInfo): Must expose a boto3 S3 client as its `s3` resource.\n\n Returns:\n (str, str):\n The bucket and key to which the file was uploaded.", "id": "f8620:m4"} {"signature": "def nonce_solid(name, n_inputs, n_outputs):", "body": "@solid(name=name,inputs=[InputDefinition(name=''.format(i)) for i in range(n_inputs)],outputs=[OutputDefinition(name=''.format(i))for i in range(n_outputs)],)def solid_fn(context, **_kwargs):for i in range():time.sleep()if i % == :context.log.error(''.format(i=i, name=name))elif i % == :context.log.warning(''.format(i=i, name=name))elif i % == :context.log.info(''.format(i=i, name=name))else:context.log.debug(''.format(i=i, name=name))return MultipleResults.from_dict({''.format(i): '' for i in range(n_outputs)})return solid_fn", "docstring": "Creates a solid with the given number of (meaningless) inputs and outputs.\n\n Config controls the behavior of the nonce solid.", "id": "f8629:m0"} {"signature": "def computeContribs(urls, rank):", "body": "num_urls = len(urls)for url in urls:yield (url, rank / num_urls)", "docstring": "Calculates URL contributions to the rank of other URLs.", "id": "f8640:m3"} {"signature": "def parseNeighbors(urls):", "body": "parts = re.split(r'', urls)return parts[], parts[]", "docstring": "Parses a urls pair string into urls pair.", "id": "f8640:m0"} {"signature": "def computeContribs(urls, rank):", "body": "num_urls = len(urls)for url in urls:yield (url, rank / num_urls)", "docstring": "Calculates URL contributions to the rank of other URLs.", "id": "f8641:m0"} {"signature": "def parseNeighbors(urls):", "body": "parts = re.split(r'', urls)return parts[], parts[]", "docstring": "Parses a urls pair string into urls pair.", "id": "f8642:m0"} {"signature": "def computeContribs(urls, rank):", "body": "num_urls = len(urls)for url in urls:yield (url, rank / num_urls)", "docstring": "Calculates URL contributions to the rank of other URLs.", "id": "f8644:m0"} {"signature": "def list(self):", "body": "return list(filter(lambda x: x.get('') != '', self._post(request=ApiActions.LIST.value,uri=ApiUri.ACTIONS.value,).get('')))", "docstring": "Get all current alerts\n\n:return: All alerts\n:rtype: list of dict\n\n:raises: This will raise a\n :class:`ServerException`\n if there is an error from Logentries", "id": "f8650:c6:m1"} {"signature": "def create(self,alert_config,occurrence_frequency_count=None,occurrence_frequency_unit=None,alert_frequency_count=None,alert_frequency_unit=None):", "body": "data = {'': occurrence_frequency_count or ,'': occurrence_frequency_unit or '','': alert_frequency_count or ,'': alert_frequency_unit or '','': [],'': True,}data.update(alert_config.args())return self._post(request=ApiActions.CREATE.value,uri=ApiUri.ACTIONS.value,params=data)", "docstring": "Create a new alert\n\n:param alert_config: A list of AlertConfig classes (Ex:\n ``[EmailAlertConfig('me@mydomain.com')]``)\n:type alert_config: list of\n :class:`PagerDutyAlertConfig`,\n :class:`WebHookAlertConfig`,\n :class:`EmailAlertConfig`,\n :class:`SlackAlertConfig`, or\n :class:`HipChatAlertConfig`\n\n:param occurrence_frequency_count: How many times per\n ``alert_frequency_unit`` for a match before issuing an alert.\n Defaults to 1\n:type occurrence_frequency_count: int\n\n:param occurrence_frequency_unit: The time period to monitor for sending\n an alert. Must be 'day', or 'hour'. Defaults to 'hour'\n:type occurrence_frequency_unit: str\n\n:param alert_frequency_count: How many times per\n ``alert_frequency_unit`` to issue an alert. Defaults to 1\n:type alert_frequency_count: int\n\n:param alert_frequency_unit: How often to regulate sending alerts.\n Must be 'day', or 'hour'. Defaults to 'hour'\n:type alert_frequency_unit: str\n\n:returns: The response of your post\n:rtype: dict\n\n:raises: This will raise a\n :class:`ServerException`\n if there is an error from Logentries", "id": "f8650:c6:m0"} {"signature": "def update(self, alert):", "body": "data = {'': alert[''],'': alert[''],'': alert[''],'': alert[''],'': alert[''],'': alert[''],'': alert[''],'': alert[''],'': alert[''],}return self._post(request=ApiActions.UPDATE.value,uri=ApiUri.ACTIONS.value,params=data)", "docstring": "Update an alert\n\n:param alert: The data to update. Must include keys:\n\n * id (str)\n * rate_count (int)\n * rate_range (str): 'day' or 'hour'\n * limit_count (int)\n * limit_range (str): 'day' or 'hour'\n * type (str)\n * schedule (list)\n * args (dict)\n:type alert: dict\n\nExample:\n\n.. code-block:: python\n\n Alert().update(\n alert={\n 'id': 'd9d4596e-49e4-4135-b3b3-847f9e7c1f43',\n 'args': {'direct': 'you@example.com'},\n 'rate_count': 1,\n 'rate_range': 'hour',\n 'limit_count': 1,\n 'limit_range': 'hour',\n 'schedule': [],\n 'enabled': True,\n 'type': 'mailto',\n }\n )\n\n:return:\n:rtype: dict", "id": "f8650:c6:m3"} {"signature": "def delete(self, id):", "body": "return self._post(request=ApiActions.DELETE.value,uri=ApiUri.ACTIONS.value,params={'': id})", "docstring": "Delete the specified alert\n\n:param id: the alert's ID\n:type id: str\n\n:raises: This will raise a\n :class:`ServerException`\n if there is an error from Logentries", "id": "f8650:c6:m4"} {"signature": "def list(self):", "body": "return self._post(request='',uri=ApiUri.TAGS.value,).get('')", "docstring": "Get all current labels\n\n:return: The Logentries API response\n:rtype: list of dict\n\n:raises: This will raise a\n :class:`ServerException`\n if there is an error from Logentries", "id": "f8650:c3:m1"} {"signature": "def delete(self, tag_id):", "body": "tag_url = ''self._api_delete(url=tag_url.format(account_id=self.account_id,tag_id=tag_id))", "docstring": "Delete the specified InactivityAlert\n\n:param tag_id: The tag ID to delete\n:type tag_id: str\n\n:raises: This will raise a\n :class:`ServerException `\n if there is an error from Logentries", "id": "f8658:c3:m1"} {"signature": "def create(self,name,query,scope_count,scope_unit,increase_positive,percentage_change,trigger_config,logs,alert_reports):", "body": "change = ''.format(pos='' if increase_positive else '',change=str(percentage_change))query_response = self._create_scheduled_query(query=query,change=change,scope_unit=scope_unit,scope_count=scope_count,)scheduled_query_id = query_response.get('', {}).get('')tag_data = {'': {'': [alert_report.to_dict()for alert_reportin alert_reports],'': name,'': scheduled_query_id,'': [{'': log}for login logs],'': '','': ''}}tag_data[''].update(trigger_config.to_dict())tag_url = ''.format(account_id=self.account_id)return self._api_post(url=tag_url,data=json.dumps(tag_data, sort_keys=True),)", "docstring": "Create an anomaly alert. This call makes 2 requests, one to create a\n\"scheduled_query\", and another to create the alert.\n\n:param name: The name for the alert\n:type name: str\n\n:param query: The `LEQL`_ query to use for detecting anomalies. Must\n result in a numerical value, so it should look something like\n ``where(...) calculate(COUNT)``\n:type query: str\n\n:param scope_count: How many ``scope_unit`` s to inspect for detecting\n an anomaly\n:type scope_count: int\n\n:param scope_unit: How far to look back in detecting an anomaly. Must\n be one of \"hour\", \"day\", or \"week\"\n:type scope_unit: str\n\n:param increase_positive: Detect a positive increase for the anomaly. A\n value of ``False`` results in detecting a decrease for the anomaly.\n:type increase_positive: bool\n\n:param percentage_change: The percentage of change to detect. Must be a\n number between 0 and 100 (inclusive).\n:type percentage_change: int\n\n:param trigger_config: A AlertTriggerConfig describing how far back to\n look back to compare to the anomaly scope.\n:type trigger_config: :class:`AlertTriggerConfig`\n\n:param logs: A list of log UUID's. (The 'key' key of a log)\n:type logs: list of str\n\n:param alert_reports: A list of AlertReportConfig to send alerts to\n:type alert_reports: list of\n :class:`AlertReportConfig`\n\n:return: The API response of the alert creation\n:rtype: dict\n\n:raises: This will raise a\n :class:`ServerException `\n if there is an error from Logentries\n\n.. _Leql: https://blog.logentries.com/2015/06/introducing-leql/", "id": "f8658:c4:m1"} {"signature": "def create(self, name, patterns, logs, trigger_config, alert_reports):", "body": "data = {'': {'': [alert_report.to_dict()for alert_reportin alert_reports],'': name,'': patterns,'': [{'': log}for login logs],'': '','': ''}}data[''].update(trigger_config.to_dict())return self._api_post(url=self.url_template.format(account_id=self.account_id),data=json.dumps(data, sort_keys=True))", "docstring": "Create an inactivity alert\n\n:param name: A name for the inactivity alert\n:type name: str\n\n:param patterns: A list of regexes to match\n:type patterns: list of str\n\n:param logs: A list of log UUID's. (The 'key' key of a log)\n:type logs: list of str\n\n:param trigger_config: A AlertTriggerConfig describing how far back to\n look for inactivity.\n:type trigger_config: :class:`AlertTriggerConfig`\n\n:param alert_reports: A list of AlertReportConfigs to send alerts to\n:type alert_reports: list of\n :class:`AlertReportConfig`\n\n:return: The API response\n:rtype: dict\n\n:raises: This will raise a\n :class:`ServerException`\n if there is an error from Logentries", "id": "f8658:c3:m0"} {"signature": "def _create_scheduled_query(self, query, change, scope_unit, scope_count):", "body": "query_data = {'': {'': '','': query,'': '','': change,'': scope_unit.title(),'': scope_count,}}query_url = ''return self._api_post(url=query_url.format(account_id=self.account_id),data=json.dumps(query_data, sort_keys=True))", "docstring": "Create the scheduled query", "id": "f8658:c4:m0"} {"signature": "def _post(self, request, uri, params=None):", "body": "request_data = {'': self.account_key,'': self.account_key,'': request,}request_data.update(params or {})response = requests.post(url=''.format(uri),headers=self.headers,data=json.dumps(request_data))if not response.ok:raise ServerException(''.format(response.status_code, response.text))return response.json()", "docstring": "A wrapper for posting things.\n\n:param request: The request type. Must be one of the\n :class:`ApiActions`\n:type request: str\n\n:param uri: The API endpoint to hit. Must be one of\n :class:`ApiUri`\n:type uri: str\n\n:param params: A dictionary of supplemental kw args\n:type params: dict\n\n:returns: The response of your post\n:rtype: dict\n\n:raises: This will raise a\n :class:`ServerException`\n if there is an error from Logentries", "id": "f8659:c2:m2"} {"signature": "def args(self):", "body": "return {'': {'': self.address,'': '','': ''},'': AlertTypes.EMAIL.value}", "docstring": ":rtype: dict", "id": "f8660:c3:m1"} {"signature": "def __init__(self, url):", "body": "super(WebHookAlertConfig, self).__init__(url=url)", "docstring": "Requires a 'url' parameter", "id": "f8660:c4:m0"} {"signature": "@abstractmethoddef args(self):", "body": "raise NotImplementedError", "docstring": "Must return a dictionary with an `args` for the alert, and a `type` key\nwith the type\n:rtype: dict", "id": "f8660:c1:m1"} {"signature": "def __init__(self, address):", "body": "super(EmailAlertConfig, self).__init__(address=address)", "docstring": "Requires an 'address' parameter", "id": "f8660:c3:m0"} {"signature": "def args(self):", "body": "return {'': {'': self.url,},'': AlertTypes.SLACK.value}", "docstring": ":rtype: dict", "id": "f8660:c5:m1"} {"signature": "def list(self):", "body": "response = requests.get(self.base_url)if not response.ok:raise ServerException(''.format(response.status_code, response.text))return {host.get(''): [log.get('')for login host.get('')]for hostin response.json().get('')}", "docstring": "Get all log sets\n\n:return: Returns a dictionary where the key is the hostname or log set,\n and the value is a list of the log keys\n:rtype: dict\n\n:raises: This will raise a\n :class:`ServerException`\n if there is an error from Logentries", "id": "f8662:c0:m1"} {"signature": "def register(name, impl):", "body": "_implementations[name] = impl", "docstring": "Register your own implementation,\n it also override registered implementation without any check.", "id": "f8676:m0"} {"signature": "def serializer(codec):", "body": "formats = {'': json,'': pickle,'': Storable}return formats[codec]", "docstring": "Create a serializer that support loads/dumps methods.\njson and pickle are fully supported.\nstorable support read only.", "id": "f8677:m0"} {"signature": "def save_session(self, request=None, response=None):", "body": "if not self._session_key:returnif self._session_data is None: self.client.delete(self._session_key)returnself.client.set(self._session_key, self._session_data)", "docstring": "Save the session in the key value store, in case a session\n has been found", "id": "f8679:c1:m2"} {"signature": "def update_session_token(self, header_name, value):", "body": "if self._session_key:self.client.delete(self._session_key)self._session_key = '' % (header_name, value)", "docstring": "Create a session from the givent header name", "id": "f8679:c1:m1"} {"signature": "def add_timestamp(logger_class, log_method, event_dict):", "body": "event_dict[''] = calendar.timegm(time.gmtime())return event_dict", "docstring": "Attach the event time, as unix epoch", "id": "f8687:m1"} {"signature": "def add_unique_id(logger_class, log_method, event):", "body": "event[''] = dna.utils.generate_unique_id()return event", "docstring": "Attach a unique id per event", "id": "f8687:m0"} {"signature": "def requires_basic_auth(resource):", "body": "@functools.wraps(resource)def decorated(*args, **kwargs):''''''auth = flask.request.authorizationuser = check_credentials(auth.username, auth.password)if not auth or user is None:log.warn('', credentials=auth)return auth_failed()log.info('', credentials=auth)flask.g.user = userreturn resource(*args, **kwargs)return decorated", "docstring": "Flask decorator protecting ressources using username/password scheme", "id": "f8692:m3"} {"signature": "def auth_failed():", "body": "return flask.Response('''', ,{'': ''})", "docstring": "Sends a 401 response", "id": "f8692:m2"} {"signature": "def check_token(token):", "body": "user = models.User.objects(api_key=token).first()return user or None", "docstring": "Verify http header token authentification", "id": "f8692:m1"} {"signature": "def api_url(full_version, resource):", "body": "return ''.format(dna.utils.Version(full_version).major, resource)", "docstring": ">>> # Harmonize api endpoints\n>>> # __version__ should be like major.minor.fix\n>>> from my_app import __version__\n>>> api_url(__version__, '/some/endpoint')\n/v0/some/endpoint", "id": "f8695:m0"} {"signature": "@cors.crossdomain(origin='')def get(self, worker_id):", "body": "code = if worker_id == '':report = {'': [{'': job,'': self._inspect_worker(job)}for job in self.jobs]}elif worker_id in self.jobs:report = {'': worker_id,'': self._inspect_worker(worker_id)}else:report = {'': ''.format(worker_id)}code = return flask.jsonify(report), code", "docstring": "Return status report", "id": "f8696:c0:m2"} {"signature": "def __get__(self, obj, objtype):", "body": "return functools.partial(self.__call__, obj)", "docstring": "Support instance methods.", "id": "f8699:c0:m3"} {"signature": "def __repr__(self):", "body": "return self.func.__doc__", "docstring": "Return the function's docstring.", "id": "f8699:c0:m2"} {"signature": "def dynamic_import(mod_path, obj_name=None):", "body": "try:module = __import__(mod_path, fromlist=[''])except ImportError as error:raise errors.DynamicImportFailed(module=''.join([mod_path, obj_name]), reason=error)importlib.reload(module)if obj_name is None:obj = moduleelif hasattr(module, obj_name):obj = getattr(module, obj_name)else:raise errors.DynamicImportFailed(module=''.join([mod_path, obj_name]),reason=''.format(module.__name__, obj_name))return Nonereturn obj", "docstring": "Take a string and return the corresponding module", "id": "f8703:m4"} {"signature": "def generate_random_name(size=, chars=string.ascii_lowercase + string.digits):", "body": "return ''.join(random.choice(chars) for _ in range(size))", "docstring": "Create a random name to assign to a node", "id": "f8703:m1"} {"signature": "def iter_osm_notes(feed_limit=, interval=, parse_timestamps=True):", "body": "last_seen_guid = Nonewhile True:u = urllib2.urlopen('' % feed_limit)tree = etree.parse(u)new_notes = []for note_item in tree.xpath(''):title = note_item.xpath('')[].textif title.startswith(''):action = ''elif title.startswith(''):action = ''elif title.startswith(''):action = ''guid = note_item.xpath('')[].textif last_seen_guid == guid:breakelif last_seen_guid == None:last_seen_guid = guidelse:note_id = int(guid.split('')[-].split('')[])new_notes.append((action, get_note(note_id, parse_timestamps)))for note in reversed(new_notes):yield noteyield model.Finished(None, None)time.sleep(interval)", "docstring": "Parses the global OSM Notes feed and yields as much Note information as possible.", "id": "f8714:m12"} {"signature": "def run_once(f):", "body": "@wraps(f)def wrapper(*args, **kwargs):if not wrapper.has_run:result = f(*args, **kwargs)wrapper.has_run = Truereturn resultwrapper.has_run = Falsereturn wrapper", "docstring": "Runs a function (successfully) only once.\n The running can be reset by setting the `has_run` attribute to False", "id": "f8732:m0"} {"signature": "def gcp_revoke_authentication(self):", "body": "self._validate_key_set()self.log.info(\"\")self.execute_cmd(['', '', '', '', ''.format(self.project_id)])self.execute_cmd(['', '', '', '', '', ''.format(self.project_id)])", "docstring": "Change default authentication to none - which is not existing one.", "id": "f8734:c0:m7"} {"signature": "def gcp_store_authentication(self):", "body": "self._validate_key_set()if not GcpAuthenticator.original_account:GcpAuthenticator.original_account = self.check_output(['', '', '', '', ''.format(self.project_id)]).decode('')self.log.info(\"\".format(GcpAuthenticator.original_account))", "docstring": "Store authentication as it was originally so it can be restored and revoke\nauthentication.", "id": "f8734:c0:m8"} {"signature": "def restart_cluster_endpoint(host):", "body": "return ''.format(host)", "docstring": "Utility function to generate the get run endpoint given the host.", "id": "f8769:m5"} {"signature": "def run_now_endpoint(host):", "body": "return ''.format(host)", "docstring": "Utility function to generate the run now endpoint given the host.", "id": "f8769:m0"} {"signature": "def get_after(sentinel, iterable):", "body": "truncated = dropwhile(lambda el: el != sentinel, iterable)next(truncated)return next(truncated)", "docstring": "Get the value after `sentinel` in an `iterable`", "id": "f8786:m0"} {"signature": "def a_function(arg_x, arg_y):", "body": "pass", "docstring": "A function with two args", "id": "f8973:m0"} {"signature": "def set_dags_paused_state(is_paused):", "body": "session = settings.Session()dms = session.query(DagModel).filter(DagModel.dag_id.in_(DAG_IDS))for dm in dms:logging.info(''.format(dm, is_paused))dm.is_paused = is_pausedsession.commit()", "docstring": "Toggle the pause state of the DAGs in the test.", "id": "f9102:m2"} {"signature": "def get_components(principal):", "body": "if not principal:return Nonereturn re.split(r'', str(principal))", "docstring": "get_components(principal) -> (short name, instance (FQDN), realm)\n\n``principal`` is the kerberos principal to parse.", "id": "f9107:m0"} {"signature": "def add_volume(self, volume):", "body": "self._add_volume(name=volume.name, configs=volume.configs)", "docstring": "Args:\n volume (Volume):", "id": "f9110:c0:m3"} {"signature": "def _add_mount(self,name,mount_path,sub_path,read_only):", "body": "self.volume_mounts.append({'': name,'': mount_path,'': sub_path,'': read_only})", "docstring": "Args:\n name (str):\n mount_path (str):\n sub_path (str):\n read_only:\n\nReturns:", "id": "f9110:c0:m6"} {"signature": "def run_pod(self, pod, startup_timeout=, get_logs=True):", "body": "resp = self.run_pod_async(pod)curr_time = dt.now()if resp.status.start_time is None:while self.pod_not_started(pod):delta = dt.now() - curr_timeif delta.seconds >= startup_timeout:raise AirflowException(\"\")time.sleep()self.log.debug('')return self._monitor_pod(pod, get_logs)", "docstring": "Launches the pod synchronously and waits for completion.\nArgs:\n pod (Pod):\n startup_timeout (int): Timeout for startup of the pod (if pod is pending for\n too long, considers task a failure", "id": "f9111:c1:m3"} {"signature": "def _get_init_containers(self):", "body": "if self.kube_config.dags_volume_claim orself.kube_config.dags_volume_host or self.kube_config.dags_in_image:return []init_environment = [{'': '','': self.kube_config.git_repo}, {'': '','': self.kube_config.git_branch}, {'': '','': self.kube_config.git_sync_root}, {'': '','': self.kube_config.git_sync_dest}, {'': '','': ''}, {'': '','': ''}]if self.kube_config.git_user:init_environment.append({'': '','': self.kube_config.git_user})if self.kube_config.git_password:init_environment.append({'': '','': self.kube_config.git_password})volume_mounts = [{'': self.kube_config.git_sync_root,'': self.dags_volume_name,'': False}]if self.kube_config.git_ssh_key_secret_name:volume_mounts.append({'': self.git_sync_ssh_secret_volume_name,'': '','': ''})init_environment.extend([{'': '','': ''},{'': '','': ''}])if self.kube_config.git_ssh_known_hosts_configmap_name:volume_mounts.append({'': self.git_sync_ssh_known_hosts_volume_name,'': '','': ''})init_environment.extend([{'': '','': ''},{'': '','': ''}])else:init_environment.append({'': '','': ''})return [{'': self.kube_config.git_sync_init_container_name,'': self.kube_config.git_sync_container,'': {'': }, '': init_environment,'': volume_mounts}]", "docstring": "When using git to retrieve the DAGs, use the GitSync Init Container", "id": "f9114:c0:m1"} {"signature": "def validate(self, body_to_validate):", "body": "try:for validation_spec in self._validation_specs:self._validate_field(validation_spec=validation_spec,dictionary_to_validate=body_to_validate)except GcpFieldValidationException as e:raise GcpFieldValidationException(\"\".format(body_to_validate, e))all_field_names = [spec[''] for spec in self._validation_specsif spec.get('') != '' andspec.get('') != self._api_version]all_union_fields = [spec for spec in self._validation_specsif spec.get('') == '']for union_field in all_union_fields:all_field_names.extend([nested_union_spec[''] for nested_union_spec in union_field['']if nested_union_spec.get('') != '' andnested_union_spec.get('') != self._api_version])for field_name in body_to_validate.keys():if field_name not in all_field_names:self.log.warning(\"\"\"\"\"\"\"\"\"\"\"\",field_name, self._validation_specs)", "docstring": "Validates if the body (dictionary) follows specification that the validator was\ninstantiated with. Raises ValidationSpecificationException or\nValidationFieldException in case of problems with specification or the\nbody not conforming to the specification respectively.\n\n:param body_to_validate: body that must follow the specification\n:type body_to_validate: dict\n:return: None", "id": "f9120:c2:m8"} {"signature": "def check_tuning_config(self, tuning_config):", "body": "for channel in tuning_config['']['']:self.check_s3_url(channel[''][''][''])", "docstring": "Check if a tuning configuration is valid\n\n:param tuning_config: tuning_config\n:type tuning_config: dict\n:return: None", "id": "f9127:c1:m5"} {"signature": "def multi_stream_iter(self, log_group, streams, positions=None):", "body": "positions = positions or {s: Position(timestamp=, skip=) for s in streams}event_iters = [self.log_stream(log_group, s, positions[s].timestamp, positions[s].skip)for s in streams]events = []for s in event_iters:if not s:events.append(None)continuetry:events.append(next(s))except StopIteration:events.append(None)while any(events):i = argmin(events, lambda x: x[''] if x else )yield (i, events[i])try:events[i] = next(event_iters[i])except StopIteration:events[i] = None", "docstring": "Iterate over the available events coming from a set of log streams in a single log group\ninterleaving the events from each stream so they're yielded in timestamp order.\n\n:param log_group: The name of the log group.\n:type log_group: str\n:param streams: A list of the log stream names. The position of the stream in this list is\n the stream number.\n:type streams: list\n:param positions: A list of pairs of (timestamp, skip) which represents the last record\n read from each stream.\n:type positions: list\n:return: A tuple of (stream number, cloudwatch log event).", "id": "f9127:c1:m9"} {"signature": "def check_status(self, job_name, key,describe_function, check_interval,max_ingestion_time,non_terminal_states=None):", "body": "if not non_terminal_states:non_terminal_states = self.non_terminal_statessec = running = Truewhile running:time.sleep(check_interval)sec = sec + check_intervaltry:response = describe_function(job_name)status = response[key]self.log.info('''' % (sec, status))except KeyError:raise AirflowException('')except ClientError:raise AirflowException('')if status in non_terminal_states:running = Trueelif status in self.failed_states:raise AirflowException('' % response[''])else:running = Falseif max_ingestion_time and sec > max_ingestion_time:raise AirflowException('', max_ingestion_time)self.log.info('')response = describe_function(job_name)return response", "docstring": "Check status of a SageMaker job\n\n:param job_name: name of the job to check status\n:type job_name: str\n:param key: the key of the response dict\n that points to the state\n:type key: str\n:param describe_function: the function used to retrieve the status\n:type describe_function: python callable\n:param args: the arguments for the function\n:param check_interval: the time interval in seconds which the operator\n will check the status of any SageMaker job\n:type check_interval: int\n:param max_ingestion_time: the maximum ingestion time in seconds. Any\n SageMaker jobs that run longer than this will fail. Setting this to\n None implies no timeout for any SageMaker job.\n:type max_ingestion_time: int\n:param non_terminal_states: the set of nonterminal states\n:type non_terminal_states: set\n:return: response of describe call after job is done", "id": "f9127:c1:m24"} {"signature": "def create_tuning_job(self, config, wait_for_completion=True,check_interval=, max_ingestion_time=None):", "body": "self.check_tuning_config(config)response = self.get_conn().create_hyper_parameter_tuning_job(**config)if wait_for_completion:self.check_status(config[''],'',self.describe_tuning_job,check_interval, max_ingestion_time)return response", "docstring": "Create a tuning job\n\n:param config: the config for tuning\n:type config: dict\n:param wait_for_completion: if the program should keep running until job finishes\n:type wait_for_completion: bool\n:param check_interval: the time interval in seconds which the operator\n will check the status of any SageMaker job\n:type check_interval: int\n:param max_ingestion_time: the maximum ingestion time in seconds. Any\n SageMaker jobs that run longer than this will fail. Setting this to\n None implies no timeout for any SageMaker job.\n:type max_ingestion_time: int\n:return: A response to tuning job creation", "id": "f9127:c1:m11"} {"signature": "def describe_endpoint_config(self, name):", "body": "return self.get_conn().describe_endpoint_config(EndpointConfigName=name)", "docstring": "Return the endpoint config info associated with the name\n\n:param name: the name of the endpoint config\n:type name: string\n:return: A dict contains all the endpoint config info", "id": "f9127:c1:m22"} {"signature": "def create_cluster(self, cluster, project_id=None, retry=DEFAULT, timeout=DEFAULT):", "body": "if isinstance(cluster, dict):cluster_proto = Cluster()cluster = self._dict_to_proto(py_dict=cluster, proto=cluster_proto)elif not isinstance(cluster, Cluster):raise AirflowException(\"\")self._append_label(cluster, '', '' + version.version)self.log.info(\"\",self.project_id, self.location, cluster.name)try:op = self.get_client().create_cluster(project_id=project_id or self.project_id,zone=self.location,cluster=cluster,retry=retry,timeout=timeout)op = self.wait_for_operation(op)return op.target_linkexcept AlreadyExists as error:self.log.info('', error.message)return self.get_cluster(name=cluster.name).self_link", "docstring": "Creates a cluster, consisting of the specified number and type of Google Compute\nEngine instances.\n\n:param cluster: A Cluster protobuf or dict. If dict is provided, it must\n be of the same form as the protobuf message\n :class:`google.cloud.container_v1.types.Cluster`\n:type cluster: dict or google.cloud.container_v1.types.Cluster\n:param project_id: Google Cloud Platform project ID\n:type project_id: str\n:param retry: A retry object (``google.api_core.retry.Retry``) used to\n retry requests.\n If None is specified, requests will not be retried.\n:type retry: google.api_core.retry.Retry\n:param timeout: The amount of time, in seconds, to wait for the request to\n complete. Note that if retry is specified, the timeout applies to each\n individual attempt.\n:type timeout: float\n:return: The full url to the new, or existing, cluster\n:raises:\n ParseError: On JSON parsing problems when trying to convert dict\n AirflowException: cluster is not dict type nor Cluster proto type", "id": "f9128:c0:m7"} {"signature": "def delete_subscription(self, project, subscription,fail_if_not_exists=False):", "body": "service = self.get_conn()full_subscription = _format_subscription(project, subscription)try:service.projects().subscriptions().delete(subscription=full_subscription).execute(num_retries=self.num_retries)except HttpError as e:if str(e.resp['']) == '':message = ''.format(full_subscription)self.log.warning(message)if fail_if_not_exists:raise PubSubException(message)else:raise PubSubException(''.format(full_subscription),e)", "docstring": "Deletes a Pub/Sub subscription, if it exists.\n\n :param project: the GCP project ID where the subscription exists\n :type project: str\n :param subscription: the Pub/Sub subscription name to delete; do not\n include the ``projects/{project}/subscriptions/`` prefix.\n :type subscription: str\n :param fail_if_not_exists: if set, raise an exception if the topic\n does not exist\n :type fail_if_not_exists: bool", "id": "f9129:c1:m6"} {"signature": "def publish(self, project, topic, messages):", "body": "body = {'': messages}full_topic = _format_topic(project, topic)request = self.get_conn().projects().topics().publish(topic=full_topic, body=body)try:request.execute(num_retries=self.num_retries)except HttpError as e:raise PubSubException(''.format(full_topic), e)", "docstring": "Publishes messages to a Pub/Sub topic.\n\n :param project: the GCP project ID in which to publish\n :type project: str\n :param topic: the Pub/Sub topic to which to publish; do not\n include the ``projects/{project}/topics/`` prefix.\n :type topic: str\n :param messages: messages to publish; if the data field in a\n message is set, it should already be base64 encoded.\n :type messages: list of PubSub messages; see\n http://cloud.google.com/pubsub/docs/reference/rest/v1/PubsubMessage", "id": "f9129:c1:m2"} {"signature": "def acknowledge(self, project, subscription, ack_ids):", "body": "service = self.get_conn()full_subscription = _format_subscription(project, subscription)try:service.projects().subscriptions().acknowledge(subscription=full_subscription, body={'': ack_ids}).execute(num_retries=self.num_retries)except HttpError as e:raise PubSubException(''.format(len(ack_ids), full_subscription), e)", "docstring": "Pulls up to ``max_messages`` messages from Pub/Sub subscription.\n\n :param project: the GCP project name or ID in which to create\n the topic\n :type project: str\n :param subscription: the Pub/Sub subscription name to delete; do not\n include the 'projects/{project}/topics/' prefix.\n :type subscription: str\n :param ack_ids: List of ReceivedMessage ackIds from a previous pull\n response\n :type ack_ids: list", "id": "f9129:c1:m8"} {"signature": "def get_conn(self):", "body": "http_authorized = self._authorize()return build('', '', http=http_authorized, cache_discovery=False)", "docstring": "Returns a Pub/Sub service object.\n\n :rtype: googleapiclient.discovery.Resource", "id": "f9129:c1:m1"} {"signature": "def _get_project_id(self, project_id):", "body": "return project_id if project_id else self.project_id", "docstring": "In case project_id is None, overrides it with default project_id from\nthe service account that is authorized.\n\n:param project_id: project id to\n:type project_id: str\n:return: the project_id specified or default project id if project_id is None", "id": "f9130:c0:m8"} {"signature": "def _authorize(self):", "body": "credentials = self._get_credentials()http = httplib2.Http()authed_http = google_auth_httplib2.AuthorizedHttp(credentials, http=http)return authed_http", "docstring": "Returns an authorized HTTP object to be used to build a Google cloud\nservice hook connection.", "id": "f9130:c0:m3"} {"signature": "def get_conn(self):", "body": "conn_config = self._get_conn_params()conn = snowflake.connector.connect(**conn_config)return conn", "docstring": "Returns a snowflake.connection object", "id": "f9131:c0:m3"} {"signature": "def _resolve_should_track_driver_status(self):", "body": "return ('' in self._connection[''] andself._connection[''] == '')", "docstring": "Determines whether or not this hook should poll the spark driver status through\nsubsequent spark-submit status requests after the initial spark-submit request\n:return: if the driver status should be tracked", "id": "f9133:c0:m1"} {"signature": "def _start_driver_status_tracking(self):", "body": "missed_job_status_reports = max_missed_job_status_reports = while self._driver_status not in [\"\", \"\",\"\", \"\", \"\"]:time.sleep()self.log.debug(\"\".format(self._driver_id))poll_drive_status_cmd = self._build_track_driver_status_command()status_process = subprocess.Popen(poll_drive_status_cmd,stdout=subprocess.PIPE,stderr=subprocess.STDOUT,bufsize=-,universal_newlines=True)self._process_spark_status_log(iter(status_process.stdout.readline, ''))returncode = status_process.wait()if returncode:if missed_job_status_reports < max_missed_job_status_reports:missed_job_status_reports = missed_job_status_reports + else:raise AirflowException(\"\".format(max_missed_job_status_reports, returncode))", "docstring": "Polls the driver based on self._driver_id to get the status.\nFinish successfully when the status is FINISHED.\nFinish failed when the status is ERROR/UNKNOWN/KILLED/FAILED.\n\nPossible status:\n\nSUBMITTED\n Submitted but not yet scheduled on a worker\nRUNNING\n Has been allocated to a worker to run\nFINISHED\n Previously ran and exited cleanly\nRELAUNCHING\n Exited non-zero or due to worker failure, but has not yet\n started running again\nUNKNOWN\n The status of the driver is temporarily not known due to\n master failure recovery\nKILLED\n A user manually killed this driver\nFAILED\n The driver exited non-zero and was not supervised\nERROR\n Unable to run or restart due to an unrecoverable error\n (e.g. missing jar file)", "id": "f9133:c0:m10"} {"signature": "def _process_spark_submit_log(self, itr):", "body": "for line in itr:line = line.strip()if self._is_yarn and self._connection[''] == '':match = re.search('', line)if match:self._yarn_application_id = match.groups()[]self.log.info(\"\",self._yarn_application_id)elif self._is_kubernetes:match = re.search(r'', line)if match:self._kubernetes_driver_pod = match.groups()[]self.log.info(\"\",self._kubernetes_driver_pod)match_exit_code = re.search(r'', line)if match_exit_code:self._spark_exit_code = int(match_exit_code.groups()[])elif self._should_track_driver_status and not self._driver_id:match_driver_id = re.search(r'', line)if match_driver_id:self._driver_id = match_driver_id.groups()[]self.log.info(\"\".format(self._driver_id))else:self.log.info(line)self.log.debug(\"\".format(line))", "docstring": "Processes the log files and extracts useful information out of it.\n\nIf the deploy-mode is 'client', log the output of the submit command as those\nare the output logs of the Spark worker directly.\n\nRemark: If the driver needs to be tracked for its status, the log-level of the\nspark deploy needs to be at least INFO (log4j.logger.org.apache.spark.deploy=INFO)\n\n:param itr: An iterator which iterates over the input of the subprocess", "id": "f9133:c0:m8"} {"signature": "def _build_spark_submit_command(self, application):", "body": "connection_cmd = self._get_spark_binary_path()connection_cmd += [\"\", self._connection['']]if self._conf:for key in self._conf:connection_cmd += [\"\", \"\".format(key, str(self._conf[key]))]if self._env_vars and (self._is_kubernetes or self._is_yarn):if self._is_yarn:tmpl = \"\"else:tmpl = \"\"for key in self._env_vars:connection_cmd += [\"\",tmpl.format(key, str(self._env_vars[key]))]elif self._env_vars and self._connection[''] != \"\":self._env = self._env_vars elif self._env_vars and self._connection[''] == \"\":raise AirflowException(\"\")if self._is_kubernetes:connection_cmd += [\"\", \"\".format(self._connection[''])]if self._files:connection_cmd += [\"\", self._files]if self._py_files:connection_cmd += [\"\", self._py_files]if self._archives:connection_cmd += [\"\", self._archives]if self._driver_class_path:connection_cmd += [\"\", self._driver_class_path]if self._jars:connection_cmd += [\"\", self._jars]if self._packages:connection_cmd += [\"\", self._packages]if self._exclude_packages:connection_cmd += [\"\", self._exclude_packages]if self._repositories:connection_cmd += [\"\", self._repositories]if self._num_executors:connection_cmd += [\"\", str(self._num_executors)]if self._total_executor_cores:connection_cmd += [\"\", str(self._total_executor_cores)]if self._executor_cores:connection_cmd += [\"\", str(self._executor_cores)]if self._executor_memory:connection_cmd += [\"\", self._executor_memory]if self._driver_memory:connection_cmd += [\"\", self._driver_memory]if self._keytab:connection_cmd += [\"\", self._keytab]if self._principal:connection_cmd += [\"\", self._principal]if self._name:connection_cmd += [\"\", self._name]if self._java_class:connection_cmd += [\"\", self._java_class]if self._verbose:connection_cmd += [\"\"]if self._connection['']:connection_cmd += [\"\", self._connection['']]if self._connection['']:connection_cmd += [\"\", self._connection['']]connection_cmd += [application]if self._application_args:connection_cmd += self._application_argsself.log.info(\"\", connection_cmd)return connection_cmd", "docstring": "Construct the spark-submit command to execute.\n:param application: command to append to the spark-submit command\n:type application: str\n:return: full command to be executed", "id": "f9133:c0:m5"} {"signature": "def get_conn(self):", "body": "self.log.debug('', self.ssh_conn_id)client = paramiko.SSHClient()if not self.allow_host_key_change:self.log.warning('''')client.load_system_host_keys()if self.no_host_key_check:self.log.warning('''')client.set_missing_host_key_policy(paramiko.AutoAddPolicy())if self.password and self.password.strip():client.connect(hostname=self.remote_host,username=self.username,password=self.password,key_filename=self.key_file,timeout=self.timeout,compress=self.compress,port=self.port,sock=self.host_proxy)else:client.connect(hostname=self.remote_host,username=self.username,key_filename=self.key_file,timeout=self.timeout,compress=self.compress,port=self.port,sock=self.host_proxy)if self.keepalive_interval:client.get_transport().set_keepalive(self.keepalive_interval)self.client = clientreturn client", "docstring": "Opens a ssh connection to the remote host.\n\n:rtype: paramiko.client.SSHClient", "id": "f9137:c0:m1"} {"signature": "def encrypt(self, key_name, plaintext, authenticated_data=None):", "body": "keys = self.get_conn().projects().locations().keyRings().cryptoKeys()body = {'': _b64encode(plaintext)}if authenticated_data:body[''] = _b64encode(authenticated_data)request = keys.encrypt(name=key_name, body=body)response = request.execute(num_retries=self.num_retries)ciphertext = response['']return ciphertext", "docstring": "Encrypts a plaintext message using Google Cloud KMS.\n\n:param key_name: The Resource Name for the key (or key version)\n to be used for encyption. Of the form\n ``projects/*/locations/*/keyRings/*/cryptoKeys/**``\n:type key_name: str\n:param plaintext: The message to be encrypted.\n:type plaintext: bytes\n:param authenticated_data: Optional additional authenticated data that\n must also be provided to decrypt the message.\n:type authenticated_data: bytes\n:return: The base 64 encoded ciphertext of the original message.\n:rtype: str", "id": "f9138:c0:m2"} {"signature": "def _b64decode(s):", "body": "return base64.b64decode(s.encode(''))", "docstring": "Base 64 decodes a string to bytes.", "id": "f9138:m1"} {"signature": "def _b64encode(s):", "body": "return base64.b64encode(s).decode('')", "docstring": "Base 64 encodes a bytes object to a string", "id": "f9138:m0"} {"signature": "def kill(self, ti):", "body": "if self.cmd is None:if not ti and not self.task_instance:raise Exception(\"\")elif not ti:ti = self.task_instancecmd_id = ti.xcom_pull(key=\"\", task_ids=ti.task_id)self.cmd = self.cls.find(cmd_id)if self.cls and self.cmd:self.log.info('', self.cmd.id)self.cmd.cancel()", "docstring": "Kill (cancel) a Qubole command\n:param ti: Task Instance of the dag, used to determine the Quboles command id\n:return: response from Qubole", "id": "f9139:c0:m3"} {"signature": "def execute(self):", "body": "proxies = {}if self.proxy:proxies = {'': self.proxy}slack_message = self._build_slack_message()self.run(endpoint=self.webhook_token,data=slack_message,headers={'': ''},extra_options={'': proxies})", "docstring": "Remote Popen (actually execute the slack webhook call)", "id": "f9140:c0:m3"} {"signature": "def upsert_document(self, document, database_name=None, collection_name=None, document_id=None):", "body": "if document_id is None:document_id = str(uuid.uuid4())if document is None:raise AirflowBadRequest(\"\")if '' in document:if document[''] is None:document[''] = document_idelse:document[''] = document_idcreated_document = self.get_conn().CreateItem(get_collection_link(self.__get_database_name(database_name),self.__get_collection_name(collection_name)),document)return created_document", "docstring": "Inserts a new document (or updates an existing one) into an existing\ncollection in the CosmosDB database.", "id": "f9141:c0:m10"} {"signature": "def delete_document(self, document_id, database_name=None, collection_name=None):", "body": "if document_id is None:raise AirflowBadRequest(\"\")self.get_conn().DeleteItem(get_document_link(self.__get_database_name(database_name),self.__get_collection_name(collection_name),document_id))", "docstring": "Delete an existing document out of a collection in the CosmosDB database.", "id": "f9141:c0:m12"} {"signature": "def delete_collection(self, collection_name, database_name=None):", "body": "if collection_name is None:raise AirflowBadRequest(\"\")self.get_conn().DeleteContainer(get_collection_link(self.__get_database_name(database_name), collection_name))", "docstring": "Deletes an existing collection in the CosmosDB database.", "id": "f9141:c0:m9"} {"signature": "def get_file(self, file_path, container_name, blob_name, **kwargs):", "body": "return self.connection.get_blob_to_path(container_name, blob_name,file_path, **kwargs)", "docstring": "Download a file from Azure Blob Storage.\n\n:param file_path: Path to the file to download.\n:type file_path: str\n:param container_name: Name of the container.\n:type container_name: str\n:param blob_name: Name of the blob.\n:type blob_name: str\n:param kwargs: Optional keyword arguments that\n `BlockBlobService.create_blob_from_path()` takes.\n:type kwargs: object", "id": "f9143:c0:m6"} {"signature": "@GoogleCloudBaseHook.catch_http_exceptiondef cancel_transfer_operation(self, operation_name):", "body": "self.get_conn().transferOperations().cancel(name=operation_name).execute(num_retries=self.num_retries)", "docstring": "Cancels an transfer operation in Google Storage Transfer Service.\n\n:param operation_name: Name of the transfer operation.\n:type operation_name: str\n:rtype: None", "id": "f9144:c2:m7"} {"signature": "def list_transfer_job(self, filter):", "body": "conn = self.get_conn()filter = self._inject_project_id(filter, FILTER, FILTER_PROJECT_ID)request = conn.transferJobs().list(filter=json.dumps(filter))jobs = []while request is not None:response = request.execute(num_retries=self.num_retries)jobs.extend(response[TRANSFER_JOBS])request = conn.transferJobs().list_next(previous_request=request, previous_response=response)return jobs", "docstring": "Lists long-running operations in Google Storage Transfer\nService that match the specified filter.\n\n:param filter: (Required) A request filter, as described in\n https://cloud.google.com/storage-transfer/docs/reference/rest/v1/transferJobs/list#body.QUERY_PARAMETERS.filter\n:type filter: dict\n:return: List of Transfer Jobs\n:rtype: list[dict]", "id": "f9144:c2:m4"} {"signature": "@GoogleCloudBaseHook.catch_http_exceptiondef update_transfer_job(self, job_name, body):", "body": "body = self._inject_project_id(body, BODY, PROJECT_ID)return (self.get_conn().transferJobs().patch(jobName=job_name, body=body).execute(num_retries=self.num_retries))", "docstring": "Updates a transfer job that runs periodically.\n\n:param job_name: (Required) Name of the job to be updated\n:type job_name: str\n:param body: A request body, as described in\n https://cloud.google.com/storage-transfer/docs/reference/rest/v1/transferJobs/patch#request-body\n:type body: dict\n:return: If successful, TransferJob.\n:rtype: dict", "id": "f9144:c2:m5"} {"signature": "def start_proxy(self):", "body": "self._download_sql_proxy_if_needed()if self.sql_proxy_process:raise AirflowException(\"\".format(self.sql_proxy_process))else:command_to_run = [self.sql_proxy_path]command_to_run.extend(self.command_line_parameters)try:self.log.info(\"\",self.cloud_sql_proxy_socket_directory)os.makedirs(self.cloud_sql_proxy_socket_directory)except OSError:passcommand_to_run.extend(self._get_credential_parameters())self.log.info(\"\", \"\".join(command_to_run))self.sql_proxy_process = Popen(command_to_run,stdin=PIPE, stdout=PIPE, stderr=PIPE)self.log.info(\"\", self.sql_proxy_process.pid)while True:line = self.sql_proxy_process.stderr.readline().decode('')return_code = self.sql_proxy_process.poll()if line == '' and return_code is not None:self.sql_proxy_process = Noneraise AirflowException(\"\".format(return_code))if line != '':self.log.info(line)if \"\" in line or \"\" in line:self.stop_proxy()raise AirflowException(\"\".format(line))if \"\" in line:return", "docstring": "Starts Cloud SQL Proxy.\n\nYou have to remember to stop the proxy if you started it!", "id": "f9146:c2:m5"} {"signature": "@GoogleCloudBaseHook.fallback_to_default_project_iddef get_instance(self, instance, project_id=None):", "body": "return self.get_conn().instances().get(project=project_id,instance=instance).execute(num_retries=self.num_retries)", "docstring": "Retrieves a resource containing information about a Cloud SQL instance.\n\n:param instance: Database instance ID. This does not include the project ID.\n:type instance: str\n:param project_id: Project ID of the project that contains the instance. If set\n to None or missing, the default project_id from the GCP connection is used.\n:type project_id: str\n:return: A Cloud SQL instance resource.\n:rtype: dict", "id": "f9146:c1:m2"} {"signature": "@provide_sessiondef create_connection(self, session=None):", "body": "connection = Connection(conn_id=self.db_conn_id)uri = self._generate_connection_uri()self.log.info(\"\", self.db_conn_id)connection.parse_from_uri(uri)session.add(connection)session.commit()", "docstring": "Create connection in the Connection table, according to whether it uses\nproxy, TCP, UNIX sockets, SSL. Connection ID will be randomly generated.\n\n:param session: Session of the SQL Alchemy ORM (automatically generated with\n decorator).", "id": "f9146:c3:m11"} {"signature": "def _wait_for_operation_to_complete(self, project_id, operation_name):", "body": "service = self.get_conn()while True:operation_response = service.operations().get(project=project_id,operation=operation_name,).execute(num_retries=self.num_retries)if operation_response.get(\"\") == CloudSqlOperationStatus.DONE:error = operation_response.get(\"\")if error:error_msg = str(error.get(\"\"))[:-]raise AirflowException(error_msg)returntime.sleep(TIME_TO_SLEEP_IN_SECONDS)", "docstring": "Waits for the named operation to complete - checks status of the\nasynchronous call.\n\n:param project_id: Project ID of the project that contains the instance.\n:type project_id: str\n:param operation_name: Name of the operation.\n:type operation_name: str\n:return: None", "id": "f9146:c1:m12"} {"signature": "def check_for_directory(self, share_name, directory_name, **kwargs):", "body": "return self.connection.exists(share_name, directory_name,**kwargs)", "docstring": "Check if a directory exists on Azure File Share.\n\n:param share_name: Name of the share.\n:type share_name: str\n:param directory_name: Name of the directory.\n:type directory_name: str\n:param kwargs: Optional keyword arguments that\n `FileService.exists()` takes.\n:type kwargs: object\n:return: True if the file exists, False otherwise.\n:rtype: bool", "id": "f9148:c0:m2"} {"signature": "def list_directories_and_files(self, share_name, directory_name=None, **kwargs):", "body": "return self.connection.list_directories_and_files(share_name,directory_name,**kwargs)", "docstring": "Return the list of directories and files stored on a Azure File Share.\n\n:param share_name: Name of the share.\n:type share_name: str\n:param directory_name: Name of the directory.\n:type directory_name: str\n:param kwargs: Optional keyword arguments that\n `FileService.list_directories_and_files()` takes.\n:type kwargs: object\n:return: A list of files and directories\n:rtype: list", "id": "f9148:c0:m4"} {"signature": "def load_file(self, file_path, share_name, directory_name, file_name, **kwargs):", "body": "self.connection.create_file_from_path(share_name, directory_name,file_name, file_path, **kwargs)", "docstring": "Upload a file to Azure File Share.\n\n:param file_path: Path to the file to load.\n:type file_path: str\n:param share_name: Name of the share.\n:type share_name: str\n:param directory_name: Name of the directory.\n:type directory_name: str\n:param file_name: Name of the file.\n:type file_name: str\n:param kwargs: Optional keyword arguments that\n `FileService.create_file_from_path()` takes.\n:type kwargs: object", "id": "f9148:c0:m8"} {"signature": "def load_stream(self, stream, share_name, directory_name, file_name, count, **kwargs):", "body": "self.connection.create_file_from_stream(share_name, directory_name,file_name, stream, count, **kwargs)", "docstring": "Upload a stream to Azure File Share.\n\n:param stream: Opened file/stream to upload as the file content.\n:type stream: file-like\n:param share_name: Name of the share.\n:type share_name: str\n:param directory_name: Name of the directory.\n:type directory_name: str\n:param file_name: Name of the file.\n:type file_name: str\n:param count: Size of the stream in bytes\n:type count: int\n:param kwargs: Optional keyword arguments that\n `FileService.create_file_from_stream()` takes.\n:type kwargs: object", "id": "f9148:c0:m10"} {"signature": "def get_conn(self):", "body": "conn = self.get_connection(self.conn_id)service_options = conn.extra_dejsonreturn FileService(account_name=conn.login,account_key=conn.password, **service_options)", "docstring": "Return the FileService object.", "id": "f9148:c0:m1"} {"signature": "def list_versions(self, project_id, model_name):", "body": "result = []full_parent_name = ''.format(project_id, model_name)request = self._mlengine.projects().models().versions().list(parent=full_parent_name, pageSize=)response = request.execute()next_page_token = response.get('', None)result.extend(response.get('', []))while next_page_token is not None:next_request = self._mlengine.projects().models().versions().list(parent=full_parent_name,pageToken=next_page_token,pageSize=)response = next_request.execute()next_page_token = response.get('', None)result.extend(response.get('', []))time.sleep()return result", "docstring": "Lists all available versions of a model. Blocks until finished.", "id": "f9151:c0:m7"} {"signature": "def create_job(self, project_id, job, use_existing_job_fn=None):", "body": "request = self._mlengine.projects().jobs().create(parent=''.format(project_id),body=job)job_id = job['']try:request.execute()except HttpError as e:if e.resp.status == :if use_existing_job_fn is not None:existing_job = self._get_job(project_id, job_id)if not use_existing_job_fn(existing_job):self.log.error('''',job_id, existing_job)raiseself.log.info('',job_id)else:self.log.error(''.format(e))raisereturn self._wait_for_job_done(project_id, job_id)", "docstring": "Launches a MLEngine job and wait for it to reach a terminal state.\n\n:param project_id: The Google Cloud project id within which MLEngine\n job will be launched.\n:type project_id: str\n\n:param job: MLEngine Job object that should be provided to the MLEngine\n API, such as: ::\n\n {\n 'jobId': 'my_job_id',\n 'trainingInput': {\n 'scaleTier': 'STANDARD_1',\n ...\n }\n }\n\n:type job: dict\n\n:param use_existing_job_fn: In case that a MLEngine job with the same\n job_id already exist, this method (if provided) will decide whether\n we should use this existing job, continue waiting for it to finish\n and returning the job object. It should accepts a MLEngine job\n object, and returns a boolean value indicating whether it is OK to\n reuse the existing job. If 'use_existing_job_fn' is not provided,\n we by default reuse the existing MLEngine job.\n:type use_existing_job_fn: function\n\n:return: The MLEngine job object if the job successfully reach a\n terminal state (which might be FAILED or CANCELLED state).\n:rtype: dict", "id": "f9151:c0:m2"} {"signature": "def create_version(self, project_id, model_name, version_spec):", "body": "parent_name = ''.format(project_id, model_name)create_request = self._mlengine.projects().models().versions().create(parent=parent_name, body=version_spec)response = create_request.execute()get_request = self._mlengine.projects().operations().get(name=response[''])return _poll_with_exponential_delay(request=get_request,max_n=,is_done_func=lambda resp: resp.get('', False),is_error_func=lambda resp: resp.get('', None) is not None)", "docstring": "Creates the Version on Google Cloud ML Engine.\n\nReturns the operation if the version was created successfully and\nraises an error otherwise.", "id": "f9151:c0:m5"} {"signature": "def _get_job(self, project_id, job_id):", "body": "job_name = ''.format(project_id, job_id)request = self._mlengine.projects().jobs().get(name=job_name)while True:try:return request.execute()except HttpError as e:if e.resp.status == :time.sleep()else:self.log.error(''.format(e))raise", "docstring": "Gets a MLEngine job based on the job name.\n\n:return: MLEngine job object if succeed.\n:rtype: dict\n\nRaises:\n googleapiclient.errors.HttpError: if HTTP error is returned from server", "id": "f9151:c0:m3"} {"signature": "def delete_many(self, mongo_collection, filter_doc, mongo_db=None, **kwargs):", "body": "collection = self.get_collection(mongo_collection, mongo_db=mongo_db)return collection.delete_many(filter_doc, **kwargs)", "docstring": "Deletes one or more documents in a mongo collection.\nhttps://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.delete_many\n\n:param mongo_collection: The name of the collection to delete from.\n:type mongo_collection: str\n:param filter_doc: A query that matches the documents to delete.\n:type filter_doc: dict\n:param mongo_db: The name of the database to use.\n Can be omitted; then the database from the connection string is used.\n:type mongo_db: str", "id": "f9152:c0:m15"} {"signature": "def replace_many(self, mongo_collection, docs,filter_docs=None, mongo_db=None, upsert=False, collation=None,**kwargs):", "body": "collection = self.get_collection(mongo_collection, mongo_db=mongo_db)if not filter_docs:filter_docs = [{'': doc['']} for doc in docs]requests = [ReplaceOne(filter_docs[i],docs[i],upsert=upsert,collation=collation)for i in range(len(docs))]return collection.bulk_write(requests, **kwargs)", "docstring": "Replaces many documents in a mongo collection.\n\nUses bulk_write with multiple ReplaceOne operations\nhttps://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.bulk_write\n\n.. note::\n If no ``filter_docs``are given, it is assumed that all\n replacement documents contain the ``_id`` field which are then\n used as filters.\n\n:param mongo_collection: The name of the collection to update.\n:type mongo_collection: str\n:param docs: The new documents.\n:type docs: list[dict]\n:param filter_docs: A list of queries that match the documents to replace.\n Can be omitted; then the _id fields from docs will be used.\n:type filter_docs: list[dict]\n:param mongo_db: The name of the database to use.\n Can be omitted; then the database from the connection string is used.\n:type mongo_db: str\n:param upsert: If ``True``, perform an insert if no documents\n match the filters for the replace operation.\n:type upsert: bool\n:param collation: An instance of\n :class:`~pymongo.collation.Collation`. This option is only\n supported on MongoDB 3.4 and above.\n:type collation: pymongo.collation.Collation", "id": "f9152:c0:m13"} {"signature": "def aggregate(self, mongo_collection, aggregate_query, mongo_db=None, **kwargs):", "body": "collection = self.get_collection(mongo_collection, mongo_db=mongo_db)return collection.aggregate(aggregate_query, **kwargs)", "docstring": "Runs an aggregation pipeline and returns the results\nhttps://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.aggregate\nhttps://api.mongodb.com/python/current/examples/aggregation.html", "id": "f9152:c0:m6"} {"signature": "def get_conn(self):", "body": "if self.client is not None:return self.clientoptions = self.extrasif options.get('', False):options.update({'': CERT_NONE})self.client = MongoClient(self.uri, **options)return self.client", "docstring": "Fetches PyMongo Client", "id": "f9152:c0:m3"} {"signature": "def insert_one(self, mongo_collection, doc, mongo_db=None, **kwargs):", "body": "collection = self.get_collection(mongo_collection, mongo_db=mongo_db)return collection.insert_one(doc, **kwargs)", "docstring": "Inserts a single document into a mongo collection\nhttps://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.insert_one", "id": "f9152:c0:m8"} {"signature": "@GoogleCloudBaseHook.fallback_to_default_project_iddef get_instance_group_manager(self, zone, resource_id, project_id=None):", "body": "response = self.get_conn().instanceGroupManagers().get(project=project_id,zone=zone,instanceGroupManager=resource_id).execute(num_retries=self.num_retries)return response", "docstring": "Retrieves Instance Group Manager by project_id, zone and resource_id.\nMust be called with keyword arguments rather than positional.\n\n:param zone: Google Cloud Platform zone where the Instance Group Manager exists\n:type zone: str\n:param resource_id: Name of the Instance Group Manager\n:type resource_id: str\n:param project_id: Optional, Google Cloud Platform project ID where the\n Compute Engine Instance exists. If set to None or missing,\n the default project_id from the GCP connection is used.\n:type project_id: str\n:return: Instance group manager representation as object according to\n https://cloud.google.com/compute/docs/reference/rest/beta/instanceGroupManagers\n:rtype: dict", "id": "f9154:c1:m8"} {"signature": "def get_conn(self):", "body": "if not self._conn:http_authorized = self._authorize()self._conn = build('', self.api_version,http=http_authorized, cache_discovery=False)return self._conn", "docstring": "Retrieves connection to Google Compute Engine.\n\n:return: Google Compute Engine services object\n:rtype: dict", "id": "f9154:c1:m1"} {"signature": "def delete_operation(self, name):", "body": "conn = self.get_conn()resp = (conn.projects().operations().delete(name=name).execute(num_retries=self.num_retries))return resp", "docstring": "Deletes the long-running operation.\n\n.. seealso::\n https://cloud.google.com/datastore/docs/reference/data/rest/v1/projects.operations/delete\n\n:param name: the name of the operation resource.\n:type name: str\n:return: none if successful.\n:rtype: dict", "id": "f9155:c0:m9"} {"signature": "def poll_operation_until_done(self, name, polling_interval_in_seconds):", "body": "while True:result = self.get_operation(name)state = result['']['']['']if state == '':self.log.info(''.format(polling_interval_in_seconds))time.sleep(polling_interval_in_seconds)else:return result", "docstring": "Poll backup operation state until it's completed.\n\n:param name: the name of the operation resource\n:type name: str\n:param polling_interval_in_seconds: The number of seconds to wait before calling another request.\n:type polling_interval_in_seconds: int\n:return: a resource operation instance.\n:rtype: dict", "id": "f9155:c0:m10"} {"signature": "def lookup(self, keys, read_consistency=None, transaction=None):", "body": "conn = self.get_conn()body = {'': keys}if read_consistency:body[''] = read_consistencyif transaction:body[''] = transactionresp = (conn.projects().lookup(projectId=self.project_id, body=body).execute(num_retries=self.num_retries))return resp", "docstring": "Lookup some entities by key.\n\n.. seealso::\n https://cloud.google.com/datastore/docs/reference/rest/v1/projects/lookup\n\n:param keys: the keys to lookup.\n:type keys: list\n:param read_consistency: the read consistency to use. default, strong or eventual.\n Cannot be used with a transaction.\n:type read_consistency: str\n:param transaction: the transaction to use, if any.\n:type transaction: str\n:return: the response body of the lookup request.\n:rtype: dict", "id": "f9155:c0:m5"} {"signature": "def commit(self, body):", "body": "conn = self.get_conn()resp = (conn.projects().commit(projectId=self.project_id, body=body).execute(num_retries=self.num_retries))return resp", "docstring": "Commit a transaction, optionally creating, deleting or modifying some entities.\n\n.. seealso::\n https://cloud.google.com/datastore/docs/reference/rest/v1/projects/commit\n\n:param body: the body of the commit request.\n:type body: dict\n:return: the response body of the commit request.\n:rtype: dict", "id": "f9155:c0:m4"} {"signature": "def get_conn(self):", "body": "if not self.connection:http_authorized = self._authorize()self.connection = build('', self.api_version, http=http_authorized,cache_discovery=False)return self.connection", "docstring": "Establishes a connection to the Google API.\n\n:return: a Google Cloud Datastore service object.\n:rtype: Resource", "id": "f9155:c0:m1"} {"signature": "def import_from_storage_bucket(self, bucket, file, namespace=None, entity_filter=None, labels=None):", "body": "admin_conn = self.get_conn()input_url = '' + ''.join(filter(None, [bucket, namespace, file]))if not entity_filter:entity_filter = {}if not labels:labels = {}body = {'': input_url,'': entity_filter,'': labels,}resp = (admin_conn.projects().import_(projectId=self.project_id, body=body).execute(num_retries=self.num_retries))return resp", "docstring": "Import a backup from Cloud Storage to Cloud Datastore.\n\n.. note::\n Keep in mind that this requests the Admin API not the Data API.\n\n.. seealso::\n https://cloud.google.com/datastore/docs/reference/admin/rest/v1/projects/import\n\n:param bucket: The name of the Cloud Storage bucket.\n:type bucket: str\n:param file: the metadata file written by the projects.export operation.\n:type file: str\n:param namespace: The Cloud Storage namespace path.\n:type namespace: str\n:param entity_filter: specify which kinds/namespaces are to be imported.\n:type entity_filter: dict\n:param labels: Client-assigned labels.\n:type labels: dict of str\n:return: a resource operation instance.\n:rtype: dict", "id": "f9155:c0:m12"} {"signature": "def begin_transaction(self):", "body": "conn = self.get_conn()resp = (conn.projects().beginTransaction(projectId=self.project_id, body={}).execute(num_retries=self.num_retries))return resp['']", "docstring": "Begins a new transaction.\n\n.. seealso::\n https://cloud.google.com/datastore/docs/reference/rest/v1/projects/beginTransaction\n\n:return: a transaction handle.\n:rtype: str", "id": "f9155:c0:m3"} {"signature": "def get_conn(self):", "body": "conn = self.get_connection(self.vertica_conn_id)conn_config = {\"\": conn.login,\"\": conn.password or '',\"\": conn.schema,\"\": conn.host or ''}if not conn.port:conn_config[\"\"] = else:conn_config[\"\"] = int(conn.port)conn = connect(**conn_config)return conn", "docstring": "Returns verticaql connection object", "id": "f9156:c0:m0"} {"signature": "def close_conn(self):", "body": "conn = self.connconn.quit()self.conn = None", "docstring": "Closes the connection. An error will occur if the\nconnection wasn't ever opened.", "id": "f9158:c0:m4"} {"signature": "def delete_directory(self, path):", "body": "conn = self.get_conn()conn.rmd(path)", "docstring": "Deletes a directory on the remote system.\n\n:param path: full path to the remote directory to delete\n:type path: str", "id": "f9158:c0:m8"} {"signature": "def get_size(self, path):", "body": "conn = self.get_conn()return conn.size(path)", "docstring": "Returns the size of a file (in bytes)\n\n:param path: remote file path\n:type path: string", "id": "f9158:c0:m14"} {"signature": "def delete_file(self, path):", "body": "conn = self.get_conn()conn.delete(path)", "docstring": "Removes a file on the FTP Server.\n\n:param path: full path to the remote file\n:type path: str", "id": "f9158:c0:m11"} {"signature": "def describe_directory(self, path):", "body": "conn = self.get_conn()conn.cwd(path)try:files = dict(conn.mlsd())except AttributeError:files = dict(mlsd(conn))return files", "docstring": "Returns a dictionary of {filename: {attributes}} for all files\non the remote system (where the MLSD command is supported).\n\n:param path: full path to the remote directory\n:type path: str", "id": "f9158:c0:m5"} {"signature": "def get_session(self, region_name=None):", "body": "session, _ = self._get_credentials(region_name)return session", "docstring": "Get the underlying boto3.session.", "id": "f9159:c0:m4"} {"signature": "def write_batch_data(self, items):", "body": "dynamodb_conn = self.get_conn()try:table = dynamodb_conn.Table(self.table_name)with table.batch_writer(overwrite_by_pkeys=self.table_keys) as batch:for item in items:batch.put_item(Item=item)return Trueexcept Exception as general_error:raise AirflowException(''.format(error=str(general_error)))", "docstring": "Write batch items to dynamodb table with provisioned throughout capacity.", "id": "f9160:c0:m2"} {"signature": "def get_attachments_by_name(self, name, check_regex, find_first=False):", "body": "attachments = []for part in self.mail.walk():mail_part = MailPart(part)if mail_part.is_attachment():found_attachment = mail_part.has_matching_name(name) if check_regexelse mail_part.has_equal_name(name)if found_attachment:file_name, file_payload = mail_part.get_file()self.log.info(''.format(file_name))attachments.append((file_name, file_payload))if find_first:breakreturn attachments", "docstring": "Gets all attachments by name for the mail.\n\n:param name: The name of the attachment to look for.\n:type name: str\n:param check_regex: Checks the name for a regular expression.\n:type check_regex: bool\n:param find_first: If set to True it will only find the first match and then quit.\n:type find_first: bool\n:returns: a list of tuples each containing name and payload\n where the attachments name matches the given name.\n:rtype: list of tuple", "id": "f9163:c1:m2"} {"signature": "def get_partitions(self,database_name,table_name,expression='',page_size=None,max_items=None):", "body": "config = {'': page_size,'': max_items,}paginator = self.get_conn().get_paginator('')response = paginator.paginate(DatabaseName=database_name,TableName=table_name,Expression=expression,PaginationConfig=config)partitions = set()for page in response:for p in page['']:partitions.add(tuple(p['']))return partitions", "docstring": "Retrieves the partition values for a table.\n\n:param database_name: The name of the catalog database where the partitions reside.\n:type database_name: str\n:param table_name: The name of the partitions' table.\n:type table_name: str\n:param expression: An expression filtering the partitions to be returned.\n Please see official AWS documentation for further information.\n https://docs.aws.amazon.com/glue/latest/dg/aws-glue-api-catalog-partitions.html#aws-glue-api-catalog-partitions-GetPartitions\n:type expression: str\n:param page_size: pagination size\n:type page_size: int\n:param max_items: maximum items to return\n:type max_items: int\n:return: set of partition values where each value is a tuple since\n a partition may be composed of multiple columns. For example:\n ``{('2018-01-01','1'), ('2018-01-01','2')}``", "id": "f9164:c0:m2"} {"signature": "def get_table_location(self, database_name, table_name):", "body": "table = self.get_table(database_name, table_name)return table['']['']", "docstring": "Get the physical location of the table\n\n:param database_name: Name of hive database (schema) @table belongs to\n:type database_name: str\n:param table_name: Name of hive table\n:type table_name: str\n:return: str", "id": "f9164:c0:m5"} {"signature": "def describe_object(self, obj):", "body": "conn = self.get_conn()return conn.__getattr__(obj).describe()", "docstring": "Get the description of an object from Salesforce.\nThis description is the object's schema and\nsome extra metadata that Salesforce stores for each object.\n\n:param obj: The name of the Salesforce object that we are getting a description of.\n:type obj: str\n:return: the description of the Salesforce object.\n:rtype: dict", "id": "f9165:c0:m3"} {"signature": "def __init__(self, conn_id):", "body": "super().__init__(conn_id)self.conn_id = conn_idself.conn = None", "docstring": "Create new connection to Salesforce and allows you to pull data out of SFDC and save it to a file.\n\nYou can then use that file with other Airflow operators to move the data into another data source.\n\n:param conn_id: the name of the connection that has the parameters we need to connect to Salesforce.\n The connection should be type `http` and include a user's security token in the `Extras` field.\n:type conn_id: str\n\n.. note::\n For the HTTP connection type, you can include a\n JSON structure in the `Extras` field.\n We need a user's security token to connect to Salesforce.\n So we define it in the `Extras` field as:\n `{\"security_token\":\"YOUR_SECURITY_TOKEN\"}`", "id": "f9165:c0:m0"} {"signature": "def write_object_to_file(self,query_results,filename,fmt=\"\",coerce_to_timestamp=False,record_time_added=False):", "body": "fmt = fmt.lower()if fmt not in ['', '', '']:raise ValueError(\"\".format(fmt))df = pd.DataFrame.from_records(query_results, exclude=[\"\"])df.columns = [column.lower() for column in df.columns]if coerce_to_timestamp and df.shape[] > :object_name = query_results[]['']['']self.log.info(\"\", object_name)schema = self.describe_object(object_name)possible_timestamp_cols = [field[''].lower()for field in schema['']if field[''] in [\"\", \"\"] and field[''].lower() in df.columns]df[possible_timestamp_cols] = df[possible_timestamp_cols].apply(self._to_timestamp)if record_time_added:fetched_time = time.time()df[\"\"] = fetched_timeif fmt == \"\":self.log.info(\"\")possible_strings = df.columns[df.dtypes == \"\"]df[possible_strings] = df[possible_strings].apply(lambda x: x.str.replace(\"\", \"\").str.replace(\"\", \"\"))df.to_csv(filename, index=False)elif fmt == \"\":df.to_json(filename, \"\", date_unit=\"\")elif fmt == \"\":df.to_json(filename, \"\", lines=True, date_unit=\"\")return df", "docstring": "Write query results to file.\n\nAcceptable formats are:\n - csv:\n comma-separated-values file. This is the default format.\n - json:\n JSON array. Each element in the array is a different row.\n - ndjson:\n JSON array but each element is new-line delimited instead of comma delimited like in `json`\n\nThis requires a significant amount of cleanup.\nPandas doesn't handle output to CSV and json in a uniform way.\nThis is especially painful for datetime types.\nPandas wants to write them as strings in CSV, but as millisecond Unix timestamps.\n\nBy default, this function will try and leave all values as they are represented in Salesforce.\nYou use the `coerce_to_timestamp` flag to force all datetimes to become Unix timestamps (UTC).\nThis is can be greatly beneficial as it will make all of your datetime fields look the same,\nand makes it easier to work with in other database environments\n\n:param query_results: the results from a SQL query\n:type query_results: list of dict\n:param filename: the name of the file where the data should be dumped to\n:type filename: str\n:param fmt: the format you want the output in. Default: 'csv'\n:type fmt: str\n:param coerce_to_timestamp: True if you want all datetime fields to be converted into Unix timestamps.\n False if you want them to be left in the same format as they were in Salesforce.\n Leaving the value as False will result in datetimes being strings. Default: False\n:type coerce_to_timestamp: bool\n:param record_time_added: True if you want to add a Unix timestamp field\n to the resulting data that marks when the data was fetched from Salesforce. Default: False\n:type record_time_added: bool\n:return: the dataframe that gets written to the file.\n:rtype: pd.Dataframe", "id": "f9165:c0:m7"} {"signature": "@classmethoddef _to_timestamp(cls, column):", "body": "try:column = pd.to_datetime(column)except ValueError:log = LoggingMixin().loglog.warning(\"\", column.name)return columnconverted = []for value in column:try:converted.append(value.timestamp())except (ValueError, AttributeError):converted.append(pd.np.NaN)return pd.Series(converted, index=column.index)", "docstring": "Convert a column of a dataframe to UNIX timestamps if applicable\n\n:param column: A Series object representing a column of a dataframe.\n:type column: pd.Series\n:return: a new series that maintains the same index as the original\n:rtype: pd.Series", "id": "f9165:c0:m6"} {"signature": "def get_available_fields(self, obj):", "body": "self.get_conn()obj_description = self.describe_object(obj)return [field[''] for field in obj_description['']]", "docstring": "Get a list of all available fields for an object.\n\n:param obj: The name of the Salesforce object that we are getting a description of.\n:type obj: str\n:return: the names of the fields.\n:rtype: list of str", "id": "f9165:c0:m4"} {"signature": "def delete_file(self, path):", "body": "conn = self.get_conn()conn.remove(path)", "docstring": "Removes a file on the FTP Server\n:param path: full path to the remote file\n:type path: str", "id": "f9168:c0:m9"} {"signature": "def fetchmany(self, size=None):", "body": "if size is None:size = self.arraysizeresult = []for _ in range(size):one = self.fetchone()if one is None:breakelse:result.append(one)return result", "docstring": "Fetch the next set of rows of a query result, returning a sequence of sequences\n(e.g. a list of tuples). An empty sequence is returned when no more rows are\navailable. The number of rows to fetch per call is specified by the parameter.\nIf it is not given, the cursor's arraysize determines the number of rows to be\nfetched. The method should try to fetch as many rows as indicated by the size\nparameter. If this is not possible due to the specified number of rows not being\navailable, fewer rows may be returned. An :py:class:`~pyhive.exc.Error`\n(or subclass) exception is raised if the previous call to\n:py:meth:`execute` did not produce any result set or no call was issued yet.", "id": "f9169:c4:m8"} {"signature": "def run_grant_dataset_view_access(self,source_dataset,view_dataset,view_table,source_project=None,view_project=None):", "body": "source_project = source_project if source_project else self.project_idview_project = view_project if view_project else self.project_idsource_dataset_resource = self.service.datasets().get(projectId=source_project, datasetId=source_dataset).execute(num_retries=self.num_retries)access = source_dataset_resource[''] if '' in source_dataset_resource else []view_access = {'': {'': view_project,'': view_dataset,'': view_table}}if view_access not in access:self.log.info('',view_project, view_dataset, view_table, source_project,source_dataset)access.append(view_access)return self.service.datasets().patch(projectId=source_project,datasetId=source_dataset,body={'': access}).execute(num_retries=self.num_retries)else:self.log.info('',view_project, view_dataset, view_table, source_project, source_dataset)return source_dataset_resource", "docstring": "Grant authorized view access of a dataset to a view table.\nIf this view has already been granted access to the dataset, do nothing.\nThis method is not atomic. Running it may clobber a simultaneous update.\n\n:param source_dataset: the source dataset\n:type source_dataset: str\n:param view_dataset: the dataset that the view is in\n:type view_dataset: str\n:param view_table: the table of the view\n:type view_table: str\n:param source_project: the project of the source dataset. If None,\n self.project_id will be used.\n:type source_project: str\n:param view_project: the project that the view is in. If None,\n self.project_id will be used.\n:type view_project: str\n:return: the datasets resource of the source dataset.", "id": "f9169:c3:m15"} {"signature": "def get_tabledata(self, dataset_id, table_id,max_results=None, selected_fields=None, page_token=None,start_index=None):", "body": "optional_params = {}if max_results:optional_params[''] = max_resultsif selected_fields:optional_params[''] = selected_fieldsif page_token:optional_params[''] = page_tokenif start_index:optional_params[''] = start_indexreturn (self.service.tabledata().list(projectId=self.project_id,datasetId=dataset_id,tableId=table_id,**optional_params).execute(num_retries=self.num_retries))", "docstring": "Get the data of a given dataset.table and optionally with selected columns.\nsee https://cloud.google.com/bigquery/docs/reference/v2/tabledata/list\n\n:param dataset_id: the dataset ID of the requested table.\n:param table_id: the table ID of the requested table.\n:param max_results: the maximum results to return.\n:param selected_fields: List of fields to return (comma-separated). If\n unspecified, all fields are returned.\n:param page_token: page token, returned from a previous call,\n identifying the result set.\n:param start_index: zero based index of the starting row to read.\n:return: map containing the requested rows.", "id": "f9169:c3:m12"} {"signature": "def close(self):", "body": "pass", "docstring": "By default, do nothing", "id": "f9169:c4:m2"} {"signature": "def run_query(self,sql,destination_dataset_table=None,write_disposition='',allow_large_results=False,flatten_results=None,udf_config=None,use_legacy_sql=None,maximum_billing_tier=None,maximum_bytes_billed=None,create_disposition='',query_params=None,labels=None,schema_update_options=(),priority='',time_partitioning=None,api_resource_configs=None,cluster_fields=None,location=None):", "body": "if time_partitioning is None:time_partitioning = {}if location:self.location = locationif not api_resource_configs:api_resource_configs = self.api_resource_configselse:_validate_value('',api_resource_configs, dict)configuration = deepcopy(api_resource_configs)if '' not in configuration:configuration[''] = {}else:_validate_value(\"\",configuration[''], dict)if sql is None and not configuration[''].get('', None):raise TypeError('''')allowed_schema_update_options = ['', \"\"]if not set(allowed_schema_update_options).issuperset(set(schema_update_options)):raise ValueError(\"\"\"\"\"\".format(schema_update_options,allowed_schema_update_options))if schema_update_options:if write_disposition not in [\"\", \"\"]:raise ValueError(\"\"\"\"\"\")if destination_dataset_table:destination_project, destination_dataset, destination_table =_split_tablename(table_input=destination_dataset_table,default_project_id=self.project_id)destination_dataset_table = {'': destination_project,'': destination_dataset,'': destination_table,}if cluster_fields:cluster_fields = {'': cluster_fields}query_param_list = [(sql, '', None, six.string_types),(priority, '', '', six.string_types),(use_legacy_sql, '', self.use_legacy_sql, bool),(query_params, '', None, list),(udf_config, '', None, list),(maximum_billing_tier, '', None, int),(maximum_bytes_billed, '', None, float),(time_partitioning, '', {}, dict),(schema_update_options, '', None, tuple),(destination_dataset_table, '', None, dict),(cluster_fields, '', None, dict),]for param_tuple in query_param_list:param, param_name, param_default, param_type = param_tupleif param_name not in configuration[''] and param in [None, {}, ()]:if param_name == '':param_default = _cleanse_time_partitioning(destination_dataset_table, time_partitioning)param = param_defaultif param not in [None, {}, ()]:_api_resource_configs_duplication_check(param_name, param, configuration[''])configuration[''][param_name] = param_validate_value(param_name, configuration[''][param_name],param_type)if param_name == '' and param:self.log.info(\"\"\"\", schema_update_options)if param_name == '':for key in ['', '', '']:if key not in configuration['']['']:raise ValueError(\"\"\"\"\"\"\"\")configuration[''].update({'': allow_large_results,'': flatten_results,'': write_disposition,'': create_disposition,})if '' in configuration[''] and configuration[''][''] and'' in configuration['']:raise ValueError(\"\"\"\")if labels:_api_resource_configs_duplication_check('', labels, configuration)configuration[''] = labelsreturn self.run_with_configuration(configuration)", "docstring": "Executes a BigQuery SQL query. Optionally persists results in a BigQuery\ntable. See here:\n\nhttps://cloud.google.com/bigquery/docs/reference/v2/jobs\n\nFor more details about these parameters.\n\n:param sql: The BigQuery SQL to execute.\n:type sql: str\n:param destination_dataset_table: The dotted ``.
``\n BigQuery table to save the query results.\n:type destination_dataset_table: str\n:param write_disposition: What to do if the table already exists in\n BigQuery.\n:type write_disposition: str\n:param allow_large_results: Whether to allow large results.\n:type allow_large_results: bool\n:param flatten_results: If true and query uses legacy SQL dialect, flattens\n all nested and repeated fields in the query results. ``allowLargeResults``\n must be true if this is set to false. For standard SQL queries, this\n flag is ignored and results are never flattened.\n:type flatten_results: bool\n:param udf_config: The User Defined Function configuration for the query.\n See https://cloud.google.com/bigquery/user-defined-functions for details.\n:type udf_config: list\n:param use_legacy_sql: Whether to use legacy SQL (true) or standard SQL (false).\n If `None`, defaults to `self.use_legacy_sql`.\n:type use_legacy_sql: bool\n:param api_resource_configs: a dictionary that contain params\n 'configuration' applied for Google BigQuery Jobs API:\n https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs\n for example, {'query': {'useQueryCache': False}}. You could use it\n if you need to provide some params that are not supported by the\n BigQueryHook like args.\n:type api_resource_configs: dict\n:param maximum_billing_tier: Positive integer that serves as a\n multiplier of the basic price.\n:type maximum_billing_tier: int\n:param maximum_bytes_billed: Limits the bytes billed for this job.\n Queries that will have bytes billed beyond this limit will fail\n (without incurring a charge). If unspecified, this will be\n set to your project default.\n:type maximum_bytes_billed: float\n:param create_disposition: Specifies whether the job is allowed to\n create new tables.\n:type create_disposition: str\n:param query_params: a list of dictionary containing query parameter types and\n values, passed to BigQuery\n:type query_params: list\n:param labels: a dictionary containing labels for the job/query,\n passed to BigQuery\n:type labels: dict\n:param schema_update_options: Allows the schema of the destination\n table to be updated as a side effect of the query job.\n:type schema_update_options: tuple\n:param priority: Specifies a priority for the query.\n Possible values include INTERACTIVE and BATCH.\n The default value is INTERACTIVE.\n:type priority: str\n:param time_partitioning: configure optional time partitioning fields i.e.\n partition by field, type and expiration as per API specifications.\n:type time_partitioning: dict\n:param cluster_fields: Request that the result of this query be stored sorted\n by one or more columns. This is only available in combination with\n time_partitioning. The order of columns given determines the sort order.\n:type cluster_fields: list[str]\n:param location: The geographic location of the job. Required except for\n US and EU. See details at\n https://cloud.google.com/bigquery/docs/locations#specifying_your_location\n:type location: str", "id": "f9169:c3:m4"} {"signature": "def get_pandas_df(self, sql, parameters=None, dialect=None):", "body": "private_key = self._get_field('', None) or self._get_field('', None)if dialect is None:dialect = '' if self.use_legacy_sql else ''return read_gbq(sql,project_id=self._get_field(''),dialect=dialect,verbose=False,private_key=private_key)", "docstring": "Returns a Pandas DataFrame for the results produced by a BigQuery\nquery. The DbApiHook method must be overridden because Pandas\ndoesn't support PEP 249 connections, except for SQLite. See:\n\nhttps://github.com/pydata/pandas/blob/master/pandas/io/sql.py#L447\nhttps://github.com/pydata/pandas/issues/6900\n\n:param sql: The BigQuery SQL to execute.\n:type sql: str\n:param parameters: The parameters to render the SQL query with (not\n used, leave to override superclass method)\n:type parameters: mapping or iterable\n:param dialect: Dialect of BigQuery SQL \u2013 legacy SQL or standard SQL\n defaults to use `self.use_legacy_sql` if not specified\n:type dialect: str in {'legacy', 'standard'}", "id": "f9169:c0:m4"} {"signature": "def run_load(self,destination_project_dataset_table,source_uris,schema_fields=None,source_format='',create_disposition='',skip_leading_rows=,write_disposition='',field_delimiter='',max_bad_records=,quote_character=None,ignore_unknown_values=False,allow_quoted_newlines=False,allow_jagged_rows=False,schema_update_options=(),src_fmt_configs=None,time_partitioning=None,cluster_fields=None,autodetect=False):", "body": "if schema_fields is None and not autodetect:raise ValueError('')if src_fmt_configs is None:src_fmt_configs = {}source_format = source_format.upper()allowed_formats = [\"\", \"\", \"\", \"\",\"\", \"\"]if source_format not in allowed_formats:raise ValueError(\"\"\"\".format(source_format, allowed_formats))allowed_schema_update_options = ['', \"\"]if not set(allowed_schema_update_options).issuperset(set(schema_update_options)):raise ValueError(\"\"\"\".format(schema_update_options, allowed_schema_update_options))destination_project, destination_dataset, destination_table =_split_tablename(table_input=destination_project_dataset_table,default_project_id=self.project_id,var_name='')configuration = {'': {'': autodetect,'': create_disposition,'': {'': destination_project,'': destination_dataset,'': destination_table,},'': source_format,'': source_uris,'': write_disposition,'': ignore_unknown_values}}time_partitioning = _cleanse_time_partitioning(destination_project_dataset_table,time_partitioning)if time_partitioning:configuration[''].update({'': time_partitioning})if cluster_fields:configuration[''].update({'': {'': cluster_fields}})if schema_fields:configuration[''][''] = {'': schema_fields}if schema_update_options:if write_disposition not in [\"\", \"\"]:raise ValueError(\"\"\"\"\"\")else:self.log.info(\"\",schema_update_options)configuration[''][''] = schema_update_optionsif max_bad_records:configuration[''][''] = max_bad_recordsif '' not in src_fmt_configs:src_fmt_configs[''] = skip_leading_rowsif '' not in src_fmt_configs:src_fmt_configs[''] = field_delimiterif '' not in src_fmt_configs:src_fmt_configs[''] = ignore_unknown_valuesif quote_character is not None:src_fmt_configs[''] = quote_characterif allow_quoted_newlines:src_fmt_configs[''] = allow_quoted_newlinessrc_fmt_to_configs_mapping = {'': ['', '', '','', '', '','', ''],'': [''],'': ['', ''],'': ['', ''],'': [''],}valid_configs = src_fmt_to_configs_mapping[source_format]src_fmt_configs = {k: vfor k, v in src_fmt_configs.items() if k in valid_configs}configuration[''].update(src_fmt_configs)if allow_jagged_rows:configuration[''][''] = allow_jagged_rowsreturn self.run_with_configuration(configuration)", "docstring": "Executes a BigQuery load command to load data from Google Cloud Storage\nto BigQuery. See here:\n\nhttps://cloud.google.com/bigquery/docs/reference/v2/jobs\n\nFor more details about these parameters.\n\n:param destination_project_dataset_table:\n The dotted ``(.|:).
($)`` BigQuery\n table to load data into. If ```` is not included, project will be the\n project defined in the connection json. If a partition is specified the\n operator will automatically append the data, create a new partition or create\n a new DAY partitioned table.\n:type destination_project_dataset_table: str\n:param schema_fields: The schema field list as defined here:\n https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load\n Required if autodetect=False; optional if autodetect=True.\n:type schema_fields: list\n:param autodetect: Attempt to autodetect the schema for CSV and JSON\n source files.\n:type autodetect: bool\n:param source_uris: The source Google Cloud\n Storage URI (e.g. gs://some-bucket/some-file.txt). A single wild\n per-object name can be used.\n:type source_uris: list\n:param source_format: File format to export.\n:type source_format: str\n:param create_disposition: The create disposition if the table doesn't exist.\n:type create_disposition: str\n:param skip_leading_rows: Number of rows to skip when loading from a CSV.\n:type skip_leading_rows: int\n:param write_disposition: The write disposition if the table already exists.\n:type write_disposition: str\n:param field_delimiter: The delimiter to use when loading from a CSV.\n:type field_delimiter: str\n:param max_bad_records: The maximum number of bad records that BigQuery can\n ignore when running the job.\n:type max_bad_records: int\n:param quote_character: The value that is used to quote data sections in a CSV\n file.\n:type quote_character: str\n:param ignore_unknown_values: [Optional] Indicates if BigQuery should allow\n extra values that are not represented in the table schema.\n If true, the extra values are ignored. If false, records with extra columns\n are treated as bad records, and if there are too many bad records, an\n invalid error is returned in the job result.\n:type ignore_unknown_values: bool\n:param allow_quoted_newlines: Whether to allow quoted newlines (true) or not\n (false).\n:type allow_quoted_newlines: bool\n:param allow_jagged_rows: Accept rows that are missing trailing optional columns.\n The missing values are treated as nulls. If false, records with missing\n trailing columns are treated as bad records, and if there are too many bad\n records, an invalid error is returned in the job result. Only applicable when\n soure_format is CSV.\n:type allow_jagged_rows: bool\n:param schema_update_options: Allows the schema of the destination\n table to be updated as a side effect of the load job.\n:type schema_update_options: tuple\n:param src_fmt_configs: configure optional fields specific to the source format\n:type src_fmt_configs: dict\n:param time_partitioning: configure optional time partitioning fields i.e.\n partition by field, type and expiration as per API specifications.\n:type time_partitioning: dict\n:param cluster_fields: Request that the result of this load be stored sorted\n by one or more columns. This is only available in combination with\n time_partitioning. The order of columns given determines the sort order.\n:type cluster_fields: list[str]", "id": "f9169:c3:m7"} {"signature": "def run_with_configuration(self, configuration):", "body": "jobs = self.service.jobs()job_data = {'': configuration}query_reply = jobs.insert(projectId=self.project_id, body=job_data).execute(num_retries=self.num_retries)self.running_job_id = query_reply['']['']if '' in query_reply['']:location = query_reply['']['']else:location = self.locationkeep_polling_job = Truewhile keep_polling_job:try:if location:job = jobs.get(projectId=self.project_id,jobId=self.running_job_id,location=location).execute(num_retries=self.num_retries)else:job = jobs.get(projectId=self.project_id,jobId=self.running_job_id).execute(num_retries=self.num_retries)if job[''][''] == '':keep_polling_job = Falseif '' in job['']:raise Exception(''.format(job[''][''], job))else:self.log.info('',self.project_id, self.running_job_id)time.sleep()except HttpError as err:if err.resp.status in [, ]:self.log.info('',err.resp.status, self.running_job_id)time.sleep()else:raise Exception(''.format(err.resp.status))return self.running_job_id", "docstring": "Executes a BigQuery SQL query. See here:\n\nhttps://cloud.google.com/bigquery/docs/reference/v2/jobs\n\nFor more details about the configuration parameter.\n\n:param configuration: The configuration parameter maps directly to\n BigQuery's configuration field in the job object. See\n https://cloud.google.com/bigquery/docs/reference/v2/jobs for\n details.", "id": "f9169:c3:m8"} {"signature": "def get_datasets_list(self, project_id=None):", "body": "dataset_project_id = project_id if project_id else self.project_idtry:datasets_list = self.service.datasets().list(projectId=dataset_project_id).execute(num_retries=self.num_retries)['']self.log.info(\"\", datasets_list)except HttpError as err:raise AirflowException(''.format(err.content))return datasets_list", "docstring": "Method returns full list of BigQuery datasets in the current project\n\n.. seealso::\n For more information, see:\n https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets/list\n\n:param project_id: Google Cloud Project for which you\n try to get all datasets\n:type project_id: str\n:return: datasets_list\n\n Example of returned datasets_list: ::\n\n {\n \"kind\":\"bigquery#dataset\",\n \"location\":\"US\",\n \"id\":\"your-project:dataset_2_test\",\n \"datasetReference\":{\n \"projectId\":\"your-project\",\n \"datasetId\":\"dataset_2_test\"\n }\n },\n {\n \"kind\":\"bigquery#dataset\",\n \"location\":\"US\",\n \"id\":\"your-project:dataset_1_test\",\n \"datasetReference\":{\n \"projectId\":\"your-project\",\n \"datasetId\":\"dataset_1_test\"\n }\n }\n ]", "id": "f9169:c3:m19"} {"signature": "def execute(self, operation, parameters=None):", "body": "sql = _bind_parameters(operation,parameters) if parameters else operationself.job_id = self.run_query(sql)", "docstring": "Executes a BigQuery query, and returns the job ID.\n\n:param operation: The query to execute.\n:type operation: str\n:param parameters: Parameters to substitute into the query.\n:type parameters: dict", "id": "f9169:c4:m4"} {"signature": "def commit(self):", "body": "pass", "docstring": "BigQueryConnection does not support transactions.", "id": "f9169:c2:m2"} {"signature": "def fetchall(self):", "body": "result = []while True:one = self.fetchone()if one is None:breakelse:result.append(one)return result", "docstring": "Fetch all (remaining) rows of a query result, returning them as a sequence of\nsequences (e.g. a list of tuples).", "id": "f9169:c4:m9"} {"signature": "def setinputsizes(self, sizes):", "body": "pass", "docstring": "Does nothing by default", "id": "f9169:c4:m12"} {"signature": "@propertydef description(self):", "body": "raise NotImplementedError", "docstring": "The schema description method is not currently implemented.", "id": "f9169:c4:m1"} {"signature": "def _validate_value(key, value, expected_type):", "body": "if not isinstance(value, expected_type):raise TypeError(\"\".format(key, expected_type, type(value)))", "docstring": "function to check expected type and raise\n error if type is not correct", "id": "f9169:m5"} {"signature": "def close(self):", "body": "pass", "docstring": "BigQueryConnection does not have anything to close.", "id": "f9169:c2:m1"} {"signature": "def cursor(self):", "body": "return BigQueryCursor(*self._args, **self._kwargs)", "docstring": "Return a new :py:class:`Cursor` object using the connection.", "id": "f9169:c2:m3"} {"signature": "def _get_webhook_endpoint(self, http_conn_id, webhook_endpoint):", "body": "if webhook_endpoint:endpoint = webhook_endpointelif http_conn_id:conn = self.get_connection(http_conn_id)extra = conn.extra_dejsonendpoint = extra.get('', '')else:raise AirflowException('''')if not re.match('', endpoint):raise AirflowException('''')return endpoint", "docstring": "Given a Discord http_conn_id, return the default webhook endpoint or override if a\nwebhook_endpoint is manually supplied.\n\n:param http_conn_id: The provided connection ID\n:param webhook_endpoint: The manually provided webhook endpoint\n:return: Webhook endpoint (str) to use", "id": "f9171:c0:m1"} {"signature": "def _do_api_call(self, endpoint_info, json):", "body": "method, endpoint = endpoint_infourl = ''.format(host=self._parse_host(self.databricks_conn.host),endpoint=endpoint)if '' in self.databricks_conn.extra_dejson:self.log.info('')auth = _TokenAuth(self.databricks_conn.extra_dejson[''])else:self.log.info('')auth = (self.databricks_conn.login, self.databricks_conn.password)if method == '':request_func = requests.getelif method == '':request_func = requests.postelse:raise AirflowException('' + method)attempt_num = while True:try:response = request_func(url,json=json,auth=auth,headers=USER_AGENT_HEADER,timeout=self.timeout_seconds)response.raise_for_status()return response.json()except requests_exceptions.RequestException as e:if not _retryable_error(e):raise AirflowException(''.format(e.response.content, e.response.status_code))self._log_request_error(attempt_num, e)if attempt_num == self.retry_limit:raise AirflowException(('' +'').format(self.retry_limit))attempt_num += sleep(self.retry_delay)", "docstring": "Utility function to perform an API call with retries\n\n:param endpoint_info: Tuple of method and endpoint\n:type endpoint_info: tuple[string, string]\n:param json: Parameters for this API call.\n:type json: dict\n:return: If the api call returns a OK status code,\n this function returns the response in JSON. Otherwise,\n we throw an AirflowException.\n:rtype: dict", "id": "f9172:c0:m2"} {"signature": "def __init__(self,databricks_conn_id='',timeout_seconds=,retry_limit=,retry_delay=):", "body": "self.databricks_conn_id = databricks_conn_idself.databricks_conn = self.get_connection(databricks_conn_id)self.timeout_seconds = timeout_secondsif retry_limit < :raise ValueError('')self.retry_limit = retry_limitself.retry_delay = retry_delay", "docstring": ":param databricks_conn_id: The name of the databricks connection to use.\n:type databricks_conn_id: str\n:param timeout_seconds: The amount of time in seconds the requests library\n will wait before timing-out.\n:type timeout_seconds: int\n:param retry_limit: The number of times to retry the connection in case of\n service outages.\n:type retry_limit: int\n:param retry_delay: The number of seconds to wait between retries (it\n might be a floating point number).\n:type retry_delay: float", "id": "f9172:c0:m0"} {"signature": "def send_message(self, queue_url, message_body, delay_seconds=, message_attributes=None):", "body": "return self.get_conn().send_message(QueueUrl=queue_url,MessageBody=message_body,DelaySeconds=delay_seconds,MessageAttributes=message_attributes or {})", "docstring": "Send message to the queue\n\n:param queue_url: queue url\n:type queue_url: str\n:param message_body: the contents of the message\n:type message_body: str\n:param delay_seconds: seconds to delay the message\n:type delay_seconds: int\n:param message_attributes: additional attributes for the message (default: None)\n For details of the attributes parameter see :py:meth:`botocore.client.SQS.send_message`\n:type message_attributes: dict\n\n:return: dict with the information about the message sent\n For details of the returned value see :py:meth:`botocore.client.SQS.send_message`\n:rtype: dict", "id": "f9176:c0:m2"} {"signature": "@GoogleCloudBaseHook.fallback_to_default_project_iddef create_instance(self, instance_id, configuration_name, node_count,display_name, project_id=None):", "body": "self._apply_to_instance(project_id, instance_id, configuration_name,node_count, display_name, lambda x: x.create())", "docstring": "Creates a new Cloud Spanner instance.\n\n:param instance_id: The ID of the Cloud Spanner instance.\n:type instance_id: str\n:param configuration_name: The name of the instance configuration defining how the\n instance will be created. Possible configuration values can be retrieved via\n https://cloud.google.com/spanner/docs/reference/rest/v1/projects.instanceConfigs/list\n:type configuration_name: str\n:param node_count: (Optional) The number of nodes allocated to the Cloud Spanner\n instance.\n:type node_count: int\n:param display_name: (Optional) The display name for the instance in the GCP\n Console. Must be between 4 and 30 characters. If this value is not set in\n the constructor, the name falls back to the instance ID.\n:type display_name: str\n:param project_id: Optional, the ID of the GCP project that owns the Cloud Spanner\n database. If set to None or missing, the default project_id from the GCP connection is used.\n:type project_id: str\n:return: None", "id": "f9178:c0:m4"} {"signature": "@GoogleCloudBaseHook.fallback_to_default_project_iddef execute_dml(self, instance_id, database_id, queries, project_id=None):", "body": "self._get_client(project_id=project_id).instance(instance_id=instance_id).database(database_id=database_id).run_in_transaction(lambda transaction: self._execute_sql_in_transaction(transaction, queries))", "docstring": "Executes an arbitrary DML query (INSERT, UPDATE, DELETE).\n\n:param instance_id: The ID of the Cloud Spanner instance.\n:type instance_id: str\n:param database_id: The ID of the database in Cloud Spanner.\n:type database_id: str\n:param queries: The queries to execute.\n:type queries: str\n:param project_id: Optional, the ID of the GCP project that owns the Cloud Spanner\n database. If set to None or missing, the default project_id from the GCP connection is used.\n:type project_id: str", "id": "f9178:c0:m11"} {"signature": "def _apply_to_instance(self, project_id, instance_id, configuration_name, node_count,display_name, func):", "body": "instance = self._get_client(project_id=project_id).instance(instance_id=instance_id, configuration_name=configuration_name,node_count=node_count, display_name=display_name)try:operation = func(instance) except GoogleAPICallError as e:self.log.error('', e.message)raise eif operation:result = operation.result()self.log.info(result)", "docstring": "Invokes a method on a given instance by applying a specified Callable.\n\n:param project_id: The ID of the GCP project that owns the Cloud Spanner\n database.\n:type project_id: str\n:param instance_id: The ID of the instance.\n:type instance_id: str\n:param configuration_name: Name of the instance configuration defining how the\n instance will be created. Required for instances which do not yet exist.\n:type configuration_name: str\n:param node_count: (Optional) Number of nodes allocated to the instance.\n:type node_count: int\n:param display_name: (Optional) The display name for the instance in the Cloud\n Console UI. (Must be between 4 and 30 characters.) If this value is not set\n in the constructor, will fall back to the instance ID.\n:type display_name: str\n:param func: Method of the instance to be called.\n:type func: Callable", "id": "f9178:c0:m3"} {"signature": "def _get_client(self, project_id):", "body": "if not self._client:self._client = Client(project=project_id, credentials=self._get_credentials())return self._client", "docstring": "Provides a client for interacting with the Cloud Spanner API.\n\n:param project_id: The ID of the GCP project.\n:type project_id: str\n:return: google.cloud.spanner_v1.client.Client\n:rtype: object", "id": "f9178:c0:m1"} {"signature": "def post_event(self, title, text, aggregation_key=None, alert_type=None, date_happened=None,handle=None, priority=None, related_event_id=None, tags=None, device_name=None):", "body": "response = api.Event.create(title=title,text=text,aggregation_key=aggregation_key,alert_type=alert_type,date_happened=date_happened,handle=handle,priority=priority,related_event_id=related_event_id,tags=tags,host=self.host,device_name=device_name,source_type_name=self.source_type_name)self.validate_response(response)return response", "docstring": "Posts an event to datadog (processing finished, potentially alerts, other issues)\nThink about this as a means to maintain persistence of alerts, rather than\nalerting itself.\n\n:param title: The title of the event\n:type title: str\n:param text: The body of the event (more information)\n:type text: str\n:param aggregation_key: Key that can be used to aggregate this event in a stream\n:type aggregation_key: str\n:param alert_type: The alert type for the event, one of\n [\"error\", \"warning\", \"info\", \"success\"]\n:type alert_type: str\n:param date_happened: POSIX timestamp of the event; defaults to now\n:type date_happened: int\n:handle: User to post the event as; defaults to owner of the application key used\n to submit.\n:param handle: str\n:param priority: Priority to post the event as. (\"normal\" or \"low\", defaults to \"normal\")\n:type priority: str\n:param related_event_id: Post event as a child of the given event\n:type related_event_id: id\n:param tags: List of tags to apply to the event\n:type tags: list[str]\n:param device_name: device_name to post the event with\n:type device_name: list", "id": "f9179:c0:m4"} {"signature": "def send_metric(self, metric_name, datapoint, tags=None, type_=None, interval=None):", "body": "response = api.Metric.send(metric=metric_name,points=datapoint,host=self.host,tags=tags,type=type_,interval=interval)self.validate_response(response)return response", "docstring": "Sends a single datapoint metric to DataDog\n\n:param metric_name: The name of the metric\n:type metric_name: str\n:param datapoint: A single integer or float related to the metric\n:type datapoint: int or float\n:param tags: A list of tags associated with the metric\n:type tags: list\n:param type_: Type of your metric: gauge, rate, or count\n:type type_: str\n:param interval: If the type of the metric is rate or count, define the corresponding interval\n:type interval: int", "id": "f9179:c0:m2"} {"signature": "@GoogleCloudBaseHook.catch_http_exceptiondef classify_text(self, document, retry=None, timeout=None, metadata=None):", "body": "client = self.get_conn()return client.classify_text(document=document, retry=retry, timeout=timeout, metadata=metadata)", "docstring": "Classifies a document into categories.\n\n:param document: Input document.\n If a dict is provided, it must be of the same form as the protobuf message Document\n:type document: dict or class google.cloud.language_v1.types.Document\n:param retry: A retry object used to retry requests. If None is specified, requests will not be\n retried.\n:type retry: google.api_core.retry.Retry\n:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if\n retry is specified, the timeout applies to each individual attempt.\n:type timeout: float\n:param metadata: Additional metadata that is provided to the method.\n:type metadata: sequence[tuple[str, str]]]\n:rtype: google.cloud.language_v1.types.AnalyzeEntitiesResponse", "id": "f9180:c0:m7"} {"signature": "@GoogleCloudBaseHook.catch_http_exceptiondef analyze_entities(self, document, encoding_type=None, retry=None, timeout=None, metadata=None):", "body": "client = self.get_conn()return client.analyze_entities(document=document, encoding_type=encoding_type, retry=retry, timeout=timeout, metadata=metadata)", "docstring": "Finds named entities in the text along with entity types,\nsalience, mentions for each entity, and other properties.\n\n:param document: Input document.\n If a dict is provided, it must be of the same form as the protobuf message Document\n:type document: dict or class google.cloud.language_v1.types.Document\n:param encoding_type: The encoding type used by the API to calculate offsets.\n:type encoding_type: google.cloud.language_v1.types.EncodingType\n:param retry: A retry object used to retry requests. If None is specified, requests will not be\n retried.\n:type retry: google.api_core.retry.Retry\n:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if\n retry is specified, the timeout applies to each individual attempt.\n:type timeout: float\n:param metadata: Additional metadata that is provided to the method.\n:type metadata: sequence[tuple[str, str]]]\n:rtype: google.cloud.language_v1.types.AnalyzeEntitiesResponse", "id": "f9180:c0:m2"} {"signature": "@GoogleCloudBaseHook.catch_http_exceptiondef annotate_text(self, document, features, encoding_type=None, retry=None, timeout=None, metadata=None):", "body": "client = self.get_conn()return client.annotate_text(document=document,features=features,encoding_type=encoding_type,retry=retry,timeout=timeout,metadata=metadata,)", "docstring": "A convenience method that provides all the features that analyzeSentiment,\nanalyzeEntities, and analyzeSyntax provide in one call.\n\n:param document: Input document.\n If a dict is provided, it must be of the same form as the protobuf message Document\n:type document: dict or google.cloud.language_v1.types.Document\n:param features: The enabled features.\n If a dict is provided, it must be of the same form as the protobuf message Features\n:type features: dict or google.cloud.language_v1.enums.Features\n:param encoding_type: The encoding type used by the API to calculate offsets.\n:type encoding_type: google.cloud.language_v1.types.EncodingType\n:param retry: A retry object used to retry requests. If None is specified, requests will not be\n retried.\n:type retry: google.api_core.retry.Retry\n:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if\n retry is specified, the timeout applies to each individual attempt.\n:type timeout: float\n:param metadata: Additional metadata that is provided to the method.\n:type metadata: sequence[tuple[str, str]]]\n:rtype: google.cloud.language_v1.types.AnnotateTextResponse", "id": "f9180:c0:m6"} {"signature": "@GoogleCloudBaseHook.catch_http_exceptiondef analyze_entity_sentiment(self, document, encoding_type=None, retry=None, timeout=None, metadata=None):", "body": "client = self.get_conn()return client.analyze_entity_sentiment(document=document, encoding_type=encoding_type, retry=retry, timeout=timeout, metadata=metadata)", "docstring": "Finds entities, similar to AnalyzeEntities in the text and analyzes sentiment associated with each\nentity and its mentions.\n\n:param document: Input document.\n If a dict is provided, it must be of the same form as the protobuf message Document\n:type document: dict or class google.cloud.language_v1.types.Document\n:param encoding_type: The encoding type used by the API to calculate offsets.\n:type encoding_type: google.cloud.language_v1.types.EncodingType\n:param retry: A retry object used to retry requests. If None is specified, requests will not be\n retried.\n:type retry: google.api_core.retry.Retry\n:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if\n retry is specified, the timeout applies to each individual attempt.\n:type timeout: float\n:param metadata: Additional metadata that is provided to the method.\n:type metadata: sequence[tuple[str, str]]]\n:rtype: google.cloud.language_v1.types.AnalyzeEntitiesResponse", "id": "f9180:c0:m3"} {"signature": "def get_conn(self):", "body": "self.conn = self.get_client_type('', self.region_name)return self.conn", "docstring": "Returns AwsHook connection object.", "id": "f9182:c0:m1"} {"signature": "@GoogleCloudBaseHook.catch_http_exception@GoogleCloudBaseHook.fallback_to_default_project_iddef remove_product_from_product_set(self,product_set_id,product_id,location=None,project_id=None,retry=None,timeout=None,metadata=None,):", "body": "client = self.get_conn()product_name = ProductSearchClient.product_path(project_id, location, product_id)product_set_name = ProductSearchClient.product_set_path(project_id, location, product_set_id)self.log.info('', product_name, product_set_name)client.remove_product_from_product_set(name=product_set_name, product=product_name, retry=retry, timeout=timeout, metadata=metadata)self.log.info('')", "docstring": "For the documentation see:\n:py:class:`~airflow.contrib.operators.gcp_vision_operator.CloudVisionRemoveProductFromProductSetOperator`", "id": "f9183:c1:m15"} {"signature": "@GoogleCloudBaseHook.catch_http_exception@GoogleCloudBaseHook.fallback_to_default_project_iddef add_product_to_product_set(self,product_set_id,product_id,location=None,project_id=None,retry=None,timeout=None,metadata=None,):", "body": "client = self.get_conn()product_name = ProductSearchClient.product_path(project_id, location, product_id)product_set_name = ProductSearchClient.product_set_path(project_id, location, product_set_id)self.log.info('', product_name, product_set_name)client.add_product_to_product_set(name=product_set_name, product=product_name, retry=retry, timeout=timeout, metadata=metadata)self.log.info('')", "docstring": "For the documentation see:\n:py:class:`~airflow.contrib.operators.gcp_vision_operator.CloudVisionAddProductToProductSetOperator`", "id": "f9183:c1:m14"} {"signature": "@GoogleCloudBaseHook.catch_http_exception@GoogleCloudBaseHook.fallback_to_default_project_iddef get_product(self, location, product_id, project_id=None, retry=None, timeout=None, metadata=None):", "body": "client = self.get_conn()name = ProductSearchClient.product_path(project_id, location, product_id)self.log.info('', name)response = client.get_product(name=name, retry=retry, timeout=timeout, metadata=metadata)self.log.info('')self.log.debug('', response)return MessageToDict(response)", "docstring": "For the documentation see:\n:class:`~airflow.contrib.operators.gcp_vision_operator.CloudVisionProductGetOperator`", "id": "f9183:c1:m9"} {"signature": "@GoogleCloudBaseHook.catch_http_exceptiondef text_detection(self, image, max_results=None, retry=None, timeout=None, additional_properties=None):", "body": "client = self.annotator_clientself.log.info(\"\")if additional_properties is None:additional_properties = {}response = client.text_detection(image=image, max_results=max_results, retry=retry, timeout=timeout, **additional_properties)response = MessageToDict(response)self._check_for_error(response)self.log.info(\"\")return response", "docstring": "For the documentation see:\n:py:class:`~airflow.contrib.operators.gcp_vision_operator.CloudVisionDetectTextOperator`", "id": "f9183:c1:m17"} {"signature": "@GoogleCloudBaseHook.catch_http_exception@GoogleCloudBaseHook.fallback_to_default_project_iddef create_reference_image(self,location,product_id,reference_image,reference_image_id=None,project_id=None,retry=None,timeout=None,metadata=None,):", "body": "client = self.get_conn()self.log.info('')parent = ProductSearchClient.product_path(project=project_id, location=location, product=product_id)response = client.create_reference_image(parent=parent,reference_image=reference_image,reference_image_id=reference_image_id,retry=retry,timeout=timeout,metadata=metadata,)self.log.info('', response.name if response else '')self.log.debug('', response)if not reference_image_id:reference_image_id = self._get_autogenerated_id(response)self.log.info('', reference_image_id)return reference_image_id", "docstring": "For the documentation see:\n:py:class:`~airflow.contrib.operators.gcp_vision_operator.CloudVisionReferenceImageCreateOperator`", "id": "f9183:c1:m12"} {"signature": "@GoogleCloudBaseHook.catch_http_exception@GoogleCloudBaseHook.fallback_to_default_project_iddef update_product(self,product,location=None,product_id=None,update_mask=None,project_id=None,retry=None,timeout=None,metadata=None,):", "body": "client = self.get_conn()product = self.product_name_determiner.get_entity_with_name(product, product_id, location, project_id)self.log.info('', product.name)response = client.update_product(product=product, update_mask=update_mask, retry=retry, timeout=timeout, metadata=metadata)self.log.info('', response.name if response else '')self.log.debug('', response)return MessageToDict(response)", "docstring": "For the documentation see:\n:class:`~airflow.contrib.operators.gcp_vision_operator.CloudVisionProductUpdateOperator`", "id": "f9183:c1:m10"} {"signature": "@GoogleCloudBaseHook.catch_http_exceptiondef annotate_image(self, request, retry=None, timeout=None):", "body": "client = self.annotator_clientself.log.info('')response = client.annotate_image(request=request, retry=retry, timeout=timeout)self.log.info('')return MessageToDict(response)", "docstring": "For the documentation see:\n:py:class:`~airflow.contrib.operators.gcp_vision_image_annotator_operator.CloudVisionAnnotateImage`", "id": "f9183:c1:m16"} {"signature": "@GoogleCloudBaseHook.fallback_to_default_project_iddef delete_table(self, instance_id, table_id, project_id=None):", "body": "table = self.get_instance(instance_id=instance_id, project_id=project_id).table(table_id=table_id)table.delete()", "docstring": "Deletes the specified table in Cloud Bigtable.\nRaises google.api_core.exceptions.NotFound if the table does not exist.\n\n:type instance_id: str\n:param instance_id: The ID of the Cloud Bigtable instance.\n:type table_id: str\n:param table_id: The ID of the table in Cloud Bigtable.\n:type project_id: str\n:param project_id: Optional, Google Cloud Platform project ID where the\n BigTable exists. If set to None or missing,\n the default project_id from the GCP connection is used.", "id": "f9184:c0:m6"} {"signature": "@staticmethoddef get_cluster_states_for_table(instance, table_id):", "body": "table = Table(table_id, instance)return table.get_cluster_states()", "docstring": "Fetches Cluster States for the specified table in Cloud Bigtable.\nRaises google.api_core.exceptions.NotFound if the table does not exist.\n\n:type instance: Instance\n:param instance: The Cloud Bigtable instance that owns the table.\n:type table_id: str\n:param table_id: The ID of the table in Cloud Bigtable to fetch Cluster States\n from.", "id": "f9184:c0:m9"} {"signature": "@GoogleCloudBaseHook.fallback_to_default_project_iddef get_instance(self, instance_id, project_id=None):", "body": "instance = self._get_client(project_id=project_id).instance(instance_id)if not instance.exists():return Nonereturn instance", "docstring": "Retrieves and returns the specified Cloud Bigtable instance if it exists.\nOtherwise, returns None.\n\n:param instance_id: The ID of the Cloud Bigtable instance.\n:type instance_id: str\n:param project_id: Optional, Google Cloud Platform project ID where the\n BigTable exists. If set to None or missing,\n the default project_id from the GCP connection is used.\n:type project_id: str", "id": "f9184:c0:m2"} {"signature": "def download_file(self, local_path, remote_path, nthreads=, overwrite=True,buffersize=, blocksize=):", "body": "multithread.ADLDownloader(self.connection,lpath=local_path,rpath=remote_path,nthreads=nthreads,overwrite=overwrite,buffersize=buffersize,blocksize=blocksize)", "docstring": "Download a file from Azure Blob Storage.\n\n:param local_path: local path. If downloading a single file, will write to this\n specific file, unless it is an existing directory, in which case a file is\n created within it. If downloading multiple files, this is the root\n directory to write within. Will create directories as required.\n:type local_path: str\n:param remote_path: remote path/globstring to use to find remote files.\n Recursive glob patterns using `**` are not supported.\n:type remote_path: str\n:param nthreads: Number of threads to use. If None, uses the number of cores.\n:type nthreads: int\n:param overwrite: Whether to forcibly overwrite existing files/directories.\n If False and remote path is a directory, will quit regardless if any files\n would be overwritten or not. If True, only matching filenames are actually\n overwritten.\n:type overwrite: bool\n:param buffersize: int [2**22]\n Number of bytes for internal buffer. This block cannot be bigger than\n a chunk and cannot be smaller than a block.\n:type buffersize: int\n:param blocksize: int [2**22]\n Number of bytes for a block. Within each chunk, we write a smaller\n block for each API call. This block cannot be bigger than a chunk.\n:type blocksize: int", "id": "f9186:c0:m4"} {"signature": "def table_exists(self, table):", "body": "keyspace = self.keyspaceif '' in table:keyspace, table = table.split('', )cluster_metadata = self.get_conn().cluster.metadatareturn (keyspace in cluster_metadata.keyspaces andtable in cluster_metadata.keyspaces[keyspace].tables)", "docstring": "Checks if a table exists in Cassandra\n\n:param table: Target Cassandra table.\n Use dot notation to target a specific keyspace.\n:type table: str", "id": "f9187:c0:m5"} {"signature": "def get_state_exitcode_details(self, resource_group, name):", "body": "current_state = self._get_instance_view(resource_group, name).current_statereturn (current_state.state,current_state.exit_code,current_state.detail_status)", "docstring": "Get the state and exitcode of a container group\n\n:param resource_group: the name of the resource group\n:type resource_group: str\n:param name: the name of the container group\n:type name: str\n:return: A tuple with the state, exitcode, and details.\n If the exitcode is unknown 0 is returned.\n:rtype: tuple(state,exitcode,details)", "id": "f9188:c0:m3"} {"signature": "def exists(self, resource_group, name):", "body": "for container in self.connection.container_groups.list_by_resource_group(resource_group):if container.name == name:return Truereturn False", "docstring": "Test if a container group exists\n\n:param resource_group: the name of the resource group\n:type resource_group: str\n:param name: the name of the container group\n:type name: str", "id": "f9188:c0:m8"} {"signature": "@staticmethoddef _full_location(project_id, location):", "body": "return ''.format(project_id, location)", "docstring": "Retrieve full location of the function in the form of\nprojects//locations/\n\n:param project_id: The Google Cloud Project project_id where the function belongs.\n:type project_id: str\n:param location: The location where the function is created.\n:type location: str\n:return:", "id": "f9189:c0:m1"} {"signature": "def get_function(self, name):", "body": "return self.get_conn().projects().locations().functions().get(name=name).execute(num_retries=self.num_retries)", "docstring": "Returns the Cloud Function with the given name.\n\n:param name: Name of the function.\n:type name: str\n:return: A Cloud Functions object representing the function.\n:rtype: dict", "id": "f9189:c0:m3"} {"signature": "def _wait_for_operation_to_complete(self, operation_name):", "body": "service = self.get_conn()while True:operation_response = service.operations().get(name=operation_name,).execute(num_retries=self.num_retries)if operation_response.get(\"\"):response = operation_response.get(\"\")error = operation_response.get(\"\")if error:raise AirflowException(str(error))return responsetime.sleep(TIME_TO_SLEEP_IN_SECONDS)", "docstring": "Waits for the named operation to complete - checks status of the\nasynchronous call.\n\n:param operation_name: The name of the operation.\n:type operation_name: str\n:return: The response returned by the operation.\n:rtype: dict\n:exception: AirflowException in case error is returned.", "id": "f9189:c0:m8"} {"signature": "def update_function(self, name, body, update_mask):", "body": "response = self.get_conn().projects().locations().functions().patch(updateMask=\"\".join(update_mask),name=name,body=body).execute(num_retries=self.num_retries)operation_name = response[\"\"]self._wait_for_operation_to_complete(operation_name=operation_name)", "docstring": "Updates Cloud Functions according to the specified update mask.\n\n:param name: The name of the function.\n:type name: str\n:param body: The body required by the cloud function patch API.\n:type body: dict\n:param update_mask: The update mask - array of fields that should be patched.\n:type update_mask: [str]\n:return: None", "id": "f9189:c0:m5"} {"signature": "def insert_object_acl(self, bucket_name, object_name, entity, role, user_project=None):", "body": "self.log.info('',object_name, bucket_name)client = self.get_conn()bucket = client.bucket(bucket_name=bucket_name)blob = bucket.blob(object_name)blob.acl.reload()blob.acl.entity_from_dict(entity_dict={\"\": entity, \"\": role})if user_project:blob.acl.user_project = user_projectblob.acl.save()self.log.info('',object_name, bucket_name)", "docstring": "Creates a new ACL entry on the specified object.\nSee: https://cloud.google.com/storage/docs/json_api/v1/objectAccessControls/insert\n\n:param bucket_name: Name of a bucket_name.\n:type bucket_name: str\n:param object_name: Name of the object. For information about how to URL encode\n object names to be path safe, see:\n https://cloud.google.com/storage/docs/json_api/#encoding\n:type object_name: str\n:param entity: The entity holding the permission, in one of the following forms:\n user-userId, user-email, group-groupId, group-email, domain-domain,\n project-team-projectId, allUsers, allAuthenticatedUsers\n See: https://cloud.google.com/storage/docs/access-control/lists#scopes\n:type entity: str\n:param role: The access permission for the entity.\n Acceptable values are: \"OWNER\", \"READER\".\n:type role: str\n:param user_project: (Optional) The project to be billed for this request.\n Required for Requester Pays buckets.\n:type user_project: str", "id": "f9190:c0:m15"} {"signature": "def upload(self, bucket_name, object_name, filename,mime_type='', gzip=False):", "body": "if gzip:filename_gz = filename + ''with open(filename, '') as f_in:with gz.open(filename_gz, '') as f_out:shutil.copyfileobj(f_in, f_out)filename = filename_gzclient = self.get_conn()bucket = client.get_bucket(bucket_name=bucket_name)blob = bucket.blob(blob_name=object_name)blob.upload_from_filename(filename=filename,content_type=mime_type)if gzip:os.remove(filename)self.log.info('', filename, object_name, bucket_name)", "docstring": "Uploads a local file to Google Cloud Storage.\n\n:param bucket_name: The bucket to upload to.\n:type bucket_name: str\n:param object_name: The object name to set when uploading the local file.\n:type object_name: str\n:param filename: The local file path to the file to be uploaded.\n:type filename: str\n:param mime_type: The MIME type to set when uploading the file.\n:type mime_type: str\n:param gzip: Option to compress file for upload\n:type gzip: bool", "id": "f9190:c0:m5"} {"signature": "def download(self, bucket_name, object_name, filename=None):", "body": "client = self.get_conn()bucket = client.get_bucket(bucket_name)blob = bucket.blob(blob_name=object_name)if filename:blob.download_to_filename(filename)self.log.info('', filename)return blob.download_as_string()", "docstring": "Get a file from Google Cloud Storage.\n\n:param bucket_name: The bucket to fetch from.\n:type bucket_name: str\n:param object_name: The object to fetch.\n:type object_name: str\n:param filename: If set, a local file path where the file should be written to.\n:type filename: str", "id": "f9190:c0:m4"} {"signature": "def export_table(self, table, export_dir, input_null_string,input_null_non_string, staging_table,clear_staging_table, enclosed_by,escaped_by, input_fields_terminated_by,input_lines_terminated_by,input_optionally_enclosed_by, batch,relaxed_isolation, extra_export_options=None):", "body": "cmd = self._export_cmd(table, export_dir, input_null_string,input_null_non_string, staging_table,clear_staging_table, enclosed_by, escaped_by,input_fields_terminated_by,input_lines_terminated_by,input_optionally_enclosed_by, batch,relaxed_isolation, extra_export_options)self.Popen(cmd)", "docstring": "Exports Hive table to remote location. Arguments are copies of direct\nsqoop command line Arguments\n\n:param table: Table remote destination\n:param export_dir: Hive table to export\n:param input_null_string: The string to be interpreted as null for\n string columns\n:param input_null_non_string: The string to be interpreted as null\n for non-string columns\n:param staging_table: The table in which data will be staged before\n being inserted into the destination table\n:param clear_staging_table: Indicate that any data present in the\n staging table can be deleted\n:param enclosed_by: Sets a required field enclosing character\n:param escaped_by: Sets the escape character\n:param input_fields_terminated_by: Sets the field separator character\n:param input_lines_terminated_by: Sets the end-of-line character\n:param input_optionally_enclosed_by: Sets a field enclosing character\n:param batch: Use batch mode for underlying statement execution\n:param relaxed_isolation: Transaction isolation to read uncommitted\n for the mappers\n:param extra_export_options: Extra export options to pass as dict.\n If a key doesn't have a value, just pass an empty string to it.\n Don't include prefix of -- for sqoop options.", "id": "f9195:c0:m10"} {"signature": "def get_conn(self):", "body": "conn = self.get_connection(self.cloudant_conn_id)self._validate_connection(conn)cloudant_session = cloudant(user=conn.login, passwd=conn.password, account=conn.host)return cloudant_session", "docstring": "Opens a connection to the cloudant service and closes it automatically if used as context manager.\n\n.. note::\n In the connection form:\n - 'host' equals the 'Account' (optional)\n - 'login' equals the 'Username (or API Key)' (required)\n - 'password' equals the 'Password' (required)\n\n:return: an authorized cloudant session context manager object.\n:rtype: cloudant", "id": "f9197:c0:m1"} {"signature": "def run_next(self, next_job):", "body": "self.log.info('', str(next_job))key, command, kube_executor_config = next_jobdag_id, task_id, execution_date, try_number = keyself.log.debug(\"\", command)self.log.debug(\"\", self.kube_config.kube_image)pod = self.worker_configuration.make_pod(namespace=self.namespace, worker_uuid=self.worker_uuid,pod_id=self._create_pod_id(dag_id, task_id),dag_id=self._make_safe_label_value(dag_id),task_id=self._make_safe_label_value(task_id),try_number=try_number,execution_date=self._datetime_to_label_safe_datestring(execution_date),airflow_command=command, kube_executor_config=kube_executor_config)self.launcher.run_pod_async(pod)self.log.debug(\"\")", "docstring": "The run_next command will check the task_queue for any un-run jobs.\nIt will then create a unique job-id, launch that job in the cluster,\nand store relevant info in the current_jobs map so we can track the job's\nstatus", "id": "f9229:c3:m3"} {"signature": "@staticmethoddef _make_safe_pod_id(safe_dag_id, safe_task_id, safe_uuid):", "body": "MAX_POD_ID_LEN = safe_key = safe_dag_id + safe_task_idsafe_pod_id = safe_key[:MAX_POD_ID_LEN - len(safe_uuid) - ] + \"\" + safe_uuidreturn safe_pod_id", "docstring": "Kubernetes pod names must be <= 253 chars and must pass the following regex for\nvalidation\n\"^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$\"\n\n:param safe_dag_id: a dag_id with only alphanumeric characters\n:param safe_task_id: a task_id with only alphanumeric characters\n:param random_uuid: a uuid\n:return: ``str`` valid Pod name of appropriate length", "id": "f9229:c3:m8"} {"signature": "@staticmethoddef _make_safe_label_value(string):", "body": "MAX_LABEL_LEN = safe_label = re.sub(r'', '', string)if len(safe_label) > MAX_LABEL_LEN or string != safe_label:safe_hash = hashlib.md5(string.encode()).hexdigest()[:]safe_label = safe_label[:MAX_LABEL_LEN - len(safe_hash) - ] + \"\" + safe_hashreturn safe_label", "docstring": "Valid label values must be 63 characters or less and must be empty or begin and\nend with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), underscores (_),\ndots (.), and alphanumerics between.\n\nIf the label value is then greater than 63 chars once made safe, or differs in any\nway from the original value sent to this function, then we need to truncate to\n53chars, and append it with a unique hash.", "id": "f9229:c3:m9"} {"signature": "def execute(self, context):", "body": "super().execute(context)return self._messages", "docstring": "Overridden to allow messages to be passed", "id": "f9231:c0:m1"} {"signature": "def is_bucket_updated(self, current_num_objects):", "body": "if current_num_objects > self.previous_num_objects:self.log.info(''''''.format(os.path.join(self.bucket, self.prefix)))self.last_activity_time = get_time()self.inactivity_seconds = self.previous_num_objects = current_num_objectselif current_num_objects < self.previous_num_objects:if self.allow_delete:self.previous_num_objects = current_num_objectsself.last_activity_time = get_time()self.log.warning('''''')else:raise RuntimeError(''''''.format(os.path.join(self.bucket, self.prefix)))else:if self.last_activity_time:self.inactivity_seconds = (get_time() - self.last_activity_time).total_seconds()else:self.last_activity_time = get_time()self.inactivity_seconds = if self.inactivity_seconds >= self.inactivity_period:if current_num_objects >= self.min_objects:self.log.info(''''''.format(current_num_objects,os.path.join(self.bucket, self.prefix),self.inactivity_period))return Truewarn_msg =''''''.format(os.path.join(self.bucket, self.prefix))self.log.warning(warn_msg)return Falsereturn False", "docstring": "Checks whether new objects have been uploaded and the inactivity_period\nhas passed and updates the state of the sensor accordingly.\n\n:param current_num_objects: number of objects in bucket during last poke.\n:type current_num_objects: int", "id": "f9232:c3:m1"} {"signature": "def _create_hook(self):", "body": "return FTPSHook(ftp_conn_id=self.ftp_conn_id)", "docstring": "Return connection hook.", "id": "f9235:c1:m0"} {"signature": "def _get_error_code(self, e):", "body": "try:matches = self.error_code_pattern.match(str(e))code = int(matches.group())return codeexcept ValueError:return e", "docstring": "Extract error code from ftp exception", "id": "f9235:c0:m2"} {"signature": "def poke(self, context):", "body": "self.log.info('', self.attachment_name)with ImapHook(imap_conn_id=self.conn_id) as imap_hook:return imap_hook.has_mail_attachment(name=self.attachment_name,mail_folder=self.mail_folder,check_regex=self.check_regex)", "docstring": "Pokes for a mail attachment on the mail server.\n\n:param context: The context that is being provided when poking.\n:type context: dict\n:return: True if attachment with the given name is present and False if not.\n:rtype: bool", "id": "f9239:c0:m1"} {"signature": "def poke(self, context):", "body": "sqs_hook = SQSHook(aws_conn_id=self.aws_conn_id)sqs_conn = sqs_hook.get_conn()self.log.info('', self.sqs_queue)messages = sqs_conn.receive_message(QueueUrl=self.sqs_queue,MaxNumberOfMessages=self.max_messages,WaitTimeSeconds=self.wait_time_seconds)self.log.info(\"\", str(messages))if '' in messages and len(messages['']) > :entries = [{'': message[''], '': message['']}for message in messages['']]result = sqs_conn.delete_message_batch(QueueUrl=self.sqs_queue,Entries=entries)if '' in result:context[''].xcom_push(key='', value=messages)return Trueelse:raise AirflowException('' + str(result) + '' + str(messages))return False", "docstring": "Check for message on subscribed queue and write to xcom the message with key ``messages``\n\n:param context: the context object\n:type context: dict\n:return: ``True`` if message is available or ``False``", "id": "f9259:c0:m1"} {"signature": "def poke(self, context):", "body": "bash_command = self.bash_commandself.log.info(\"\", gettempdir())with TemporaryDirectory(prefix='') as tmp_dir:with NamedTemporaryFile(dir=tmp_dir, prefix=self.task_id) as f:f.write(bytes(bash_command, ''))f.flush()fname = f.namescript_location = tmp_dir + \"\" + fnameself.log.info(\"\", script_location)self.log.info(\"\", bash_command)sp = Popen(['', fname],stdout=PIPE, stderr=STDOUT,close_fds=True, cwd=tmp_dir,env=self.env, preexec_fn=os.setsid)self.sp = spself.log.info(\"\")line = ''for line in iter(sp.stdout.readline, b''):line = line.decode(self.output_encoding).strip()self.log.info(line)sp.wait()self.log.info(\"\", sp.returncode)return not sp.returncode", "docstring": "Execute the bash command in a temporary directory\nwhich will be cleaned afterwards", "id": "f9262:c0:m1"} {"signature": "def _get_field(self, extras, field, default=None):", "body": "long_f = ''.format(field)if long_f in extras:return extras[long_f]else:self.log.info('', field)return default", "docstring": "Fetches a field from extras, and returns it. This is some Airflow\nmagic. The google_cloud_platform hook type adds custom UI elements\nto the hook page, which allow admins to specify service_account,\nkey_path, etc. They get formatted as shown below.", "id": "f9266:c2:m3"} {"signature": "def execute(self, context):", "body": "hook = WasbHook(wasb_conn_id=self.wasb_conn_id)self.log.info(''''.format(self.file_path, self.container_name, self.blob_name))hook.load_file(self.file_path, self.container_name,self.blob_name, **self.load_options)", "docstring": "Upload a file to Azure Blob Storage.", "id": "f9274:c0:m1"} {"signature": "@staticmethoddef _convert_date_to_dict(field_date):", "body": "return {DAY: field_date.day, MONTH: field_date.month, YEAR: field_date.year}", "docstring": "Convert native python ``datetime.date`` object to a format supported by the API", "id": "f9287:c0:m6"} {"signature": "def _normalize_mlengine_job_id(job_id):", "body": "match = re.search(r'', job_id)if match and match.start() == :job = ''.format(job_id)else:job = job_idtracker = cleansed_job_id = ''for m in re.finditer(r'', job):cleansed_job_id += re.sub(r'', '',job[tracker:m.start()])cleansed_job_id += job[m.start():m.end()]tracker = m.end()cleansed_job_id += re.sub(r'', '', job[tracker:])return cleansed_job_id", "docstring": "Replaces invalid MLEngine job_id characters with '_'.\n\nThis also adds a leading 'z' in case job_id starts with an invalid\ncharacter.\n\nArgs:\n job_id: A job_id str that may have invalid characters.\n\nReturns:\n A valid job_id representation.", "id": "f9297:m0"} {"signature": "def execute(self, context):", "body": "self.hook = OpsgenieAlertHook(self.opsgenie_conn_id)self.hook.execute(self._build_opsgenie_payload())", "docstring": "Call the OpsgenieAlertHook to post message", "id": "f9302:c0:m2"} {"signature": "def prepare_additional_parameters(additional_properties, language_hints, web_detection_params):", "body": "if language_hints is None and web_detection_params is None:return additional_propertiesif additional_properties is None:return {}merged_additional_parameters = deepcopy(additional_properties)if '' not in merged_additional_parameters:merged_additional_parameters[''] = {}merged_additional_parameters[''][''] = merged_additional_parameters[''].get('', language_hints)merged_additional_parameters[''][''] = merged_additional_parameters[''].get('', web_detection_params)return merged_additional_parameters", "docstring": "Creates additional_properties parameter based on language_hints, web_detection_params and\nadditional_properties parameters specified by the user", "id": "f9315:m0"} {"signature": "def _wait_for_task_ended(self):", "body": "try:waiter = self.client.get_waiter('')waiter.config.max_attempts = sys.maxsize waiter.wait(jobs=[self.jobId])except ValueError:retry = Trueretries = while retries < self.max_retries and retry:self.log.info('', retries)response = self.client.describe_jobs(jobs=[self.jobId])if response[''][-][''] in ['', '']:retry = Falsesleep( + pow(retries * , ))retries += ", "docstring": "Try to use a waiter from the below pull request\n\n * https://github.com/boto/botocore/pull/1307\n\nIf the waiter is not available apply a exponential backoff\n\n * docs.aws.amazon.com/general/latest/gr/api-retries.html", "id": "f9317:c0:m2"} {"signature": "@apply_defaultsdef __init__(self,job_id,json=None,notebook_params=None,python_params=None,spark_submit_params=None,databricks_conn_id='',polling_period_seconds=,databricks_retry_limit=,databricks_retry_delay=,do_xcom_push=False,**kwargs):", "body": "super().__init__(**kwargs)self.json = json or {}self.databricks_conn_id = databricks_conn_idself.polling_period_seconds = polling_period_secondsself.databricks_retry_limit = databricks_retry_limitself.databricks_retry_delay = databricks_retry_delayif job_id is not None:self.json[''] = job_idif notebook_params is not None:self.json[''] = notebook_paramsif python_params is not None:self.json[''] = python_paramsif spark_submit_params is not None:self.json[''] = spark_submit_paramsself.json = _deep_string_coerce(self.json)self.run_id = Noneself.do_xcom_push = do_xcom_push", "docstring": "Creates a new ``DatabricksRunNowOperator``.", "id": "f9320:c1:m0"} {"signature": "def execute(self, context):", "body": "self.log.info('',self.imap_attachment_name, self.s3_key)with ImapHook(imap_conn_id=self.imap_conn_id) as imap_hook:imap_mail_attachments = imap_hook.retrieve_mail_attachments(name=self.imap_attachment_name,mail_folder=self.imap_mail_folder,check_regex=self.imap_check_regex,latest_only=True)s3_hook = S3Hook(aws_conn_id=self.s3_conn_id)s3_hook.load_bytes(bytes_data=imap_mail_attachments[][], key=self.s3_key)", "docstring": "This function executes the transfer from the email server (via imap) into s3.\n\n:param context: The context while executing.\n:type context: dict", "id": "f9328:c0:m1"} {"signature": "def _make_intermediate_dirs(sftp_client, remote_directory):", "body": "if remote_directory == '':sftp_client.chdir('')returnif remote_directory == '':returntry:sftp_client.chdir(remote_directory)except IOError:dirname, basename = os.path.split(remote_directory.rstrip(''))_make_intermediate_dirs(sftp_client, dirname)sftp_client.mkdir(basename)sftp_client.chdir(basename)return", "docstring": "Create all the intermediate directories in a remote host\n\n:param sftp_client: A Paramiko SFTP client.\n:param remote_directory: Absolute Path of the directory containing the file\n:return:", "id": "f9333:m0"} {"signature": "def google_cloud_to_local(self, file_name):", "body": "if not file_name.startswith(''):return file_namepath_components = file_name[self.GCS_PREFIX_LENGTH:].split('')if len(path_components) < :raise Exception(''.format(file_name))bucket_id = path_components[]object_id = ''.join(path_components[:])local_file = ''.format(str(uuid.uuid4())[:],path_components[-])self._gcs_hook.download(bucket_id, object_id, local_file)if os.stat(local_file).st_size > :return local_fileraise Exception(''.format(file_name))", "docstring": "Checks whether the file specified by file_name is stored in Google Cloud\nStorage (GCS), if so, downloads the file and saves it locally. The full\npath of the saved file will be returned. Otherwise the local file_name\nwill be returned immediately.\n\n:param file_name: The full path of input file.\n:type file_name: str\n:return: The full path of local file.\n:rtype: str", "id": "f9334:c3:m1"} {"signature": "def execute(self, context):", "body": "self.hook = SqoopHook(conn_id=self.conn_id,verbose=self.verbose,num_mappers=self.num_mappers,hcatalog_database=self.hcatalog_database,hcatalog_table=self.hcatalog_table,properties=self.properties)if self.cmd_type == '':self.hook.export_table(table=self.table,export_dir=self.export_dir,input_null_string=self.input_null_string,input_null_non_string=self.input_null_non_string,staging_table=self.staging_table,clear_staging_table=self.clear_staging_table,enclosed_by=self.enclosed_by,escaped_by=self.escaped_by,input_fields_terminated_by=self.input_fields_terminated_by,input_lines_terminated_by=self.input_lines_terminated_by,input_optionally_enclosed_by=self.input_optionally_enclosed_by,batch=self.batch,relaxed_isolation=self.relaxed_isolation,extra_export_options=self.extra_export_options)elif self.cmd_type == '':if self.create_hcatalog_table:self.extra_import_options[''] = ''if self.table and self.query:raise AirflowException('')if self.table:self.hook.import_table(table=self.table,target_dir=self.target_dir,append=self.append,file_type=self.file_type,columns=self.columns,split_by=self.split_by,where=self.where,direct=self.direct,driver=self.driver,extra_import_options=self.extra_import_options)elif self.query:self.hook.import_query(query=self.query,target_dir=self.target_dir,append=self.append,file_type=self.file_type,split_by=self.split_by,direct=self.direct,driver=self.driver,extra_import_options=self.extra_import_options)else:raise AirflowException(\"\")else:raise AirflowException(\"\")", "docstring": "Execute sqoop job", "id": "f9338:c0:m1"} {"signature": "def build_job(self, jenkins_server):", "body": "if self.parameters and isinstance(self.parameters, six.string_types):import astself.parameters = ast.literal_eval(self.parameters)if not self.parameters:self.parameters = Nonerequest = Request(jenkins_server.build_job_url(self.job_name,self.parameters, None))return jenkins_request_with_headers(jenkins_server, request)", "docstring": "This function makes an API call to Jenkins to trigger a build for 'job_name'\nIt returned a dict with 2 keys : body and headers.\nheaders contains also a dict-like object which can be queried to get\nthe location to poll in the queue.\n\n:param jenkins_server: The jenkins server where the job should be triggered\n:return: Dict containing the response body (key body)\n and the headers coming along (headers)", "id": "f9344:c0:m1"} {"signature": "def poll_job_in_queue(self, location, jenkins_server):", "body": "try_count = location = location + ''self.log.info('', location)while try_count < self.max_try_before_job_appears:location_answer = jenkins_request_with_headers(jenkins_server,Request(location))if location_answer is not None:json_response = json.loads(location_answer[''])if '' in json_response:build_number = json_response['']['']self.log.info('',build_number)return build_numbertry_count += time.sleep(self.sleep_time)raise AirflowException(\"\"\"\",self.max_try_before_job_appears)", "docstring": "This method poll the jenkins queue until the job is executed.\nWhen we trigger a job through an API call,\nthe job is first put in the queue without having a build number assigned.\nThus we have to wait the job exit the queue to know its build number.\nTo do so, we have to add /api/json (or /api/xml) to the location\nreturned by the build_job call and poll this file.\nWhen a 'executable' block appears in the json, it means the job execution started\nand the field 'number' then contains the build number.\n\n:param location: Location to poll, returned in the header of the build_job call\n:param jenkins_server: The jenkins server to poll\n:return: The build_number corresponding to the triggered job", "id": "f9344:c0:m2"} {"signature": "@staticmethoddef get_s3_key(s3_key):", "body": "parsed_s3_key = urlparse(s3_key)return parsed_s3_key.path.lstrip('')", "docstring": "This parses the correct format for S3 keys\n regardless of how the S3 url is passed.", "id": "f9346:c0:m1"} {"signature": "def _configure_csv_file(self, file_handle, schema):", "body": "csv_writer = csv.writer(file_handle, encoding='',delimiter=self.field_delimiter)csv_writer.writerow(schema)return csv_writer", "docstring": "Configure a csv writer with the file_handle and write schema\n as headers for the new file.", "id": "f9347:c0:m4"} {"signature": "def _get_col_type_dict(self):", "body": "schema = []if isinstance(self.schema, string_types):schema = json.loads(self.schema)elif isinstance(self.schema, list):schema = self.schemaelif self.schema is not None:self.log.warn('''')col_type_dict = {}try:col_type_dict = {col['']: col[''] for col in schema}except KeyError:self.log.warn('''''')return col_type_dict", "docstring": "Return a dict of column name and column type based on self.schema if not None.", "id": "f9347:c0:m8"} {"signature": "@classmethoddef type_map(cls, mssql_type):", "body": "d = {: '',: '',: ''}return d[mssql_type] if mssql_type in d else ''", "docstring": "Helper function that maps from MSSQL fields to BigQuery fields. Used\nwhen a schema_filename is set.", "id": "f9348:c0:m7"} {"signature": "def _write_local_schema_file(self, cursor):", "body": "schema = []for field in cursor.description:field_name = field[].replace('', '') field_type = self.type_map(field[])field_mode = '' schema.append({'': field_name,'': field_type,'': field_mode,})self.log.info('', self.schema_filename, schema)tmp_schema_file_handle = NamedTemporaryFile(delete=True)s = json.dumps(schema, sort_keys=True)s = s.encode('')tmp_schema_file_handle.write(s)return {self.schema_filename: tmp_schema_file_handle}", "docstring": "Takes a cursor, and writes the BigQuery schema for the results to a\nlocal file system.\n\n:return: A dictionary where key is a filename to be used as an object\n name in GCS, and values are file handles to local files that\n contains the BigQuery schema fields in .json format.", "id": "f9348:c0:m4"} {"signature": "def execute(self, context):", "body": "self._hook = SparkSubmitHook(conf=self._conf,conn_id=self._conn_id,files=self._files,py_files=self._py_files,archives=self._archives,driver_class_path=self._driver_class_path,jars=self._jars,java_class=self._java_class,packages=self._packages,exclude_packages=self._exclude_packages,repositories=self._repositories,total_executor_cores=self._total_executor_cores,executor_cores=self._executor_cores,executor_memory=self._executor_memory,driver_memory=self._driver_memory,keytab=self._keytab,principal=self._principal,name=self._name,num_executors=self._num_executors,application_args=self._application_args,env_vars=self._env_vars,verbose=self._verbose,spark_binary=self._spark_binary)self._hook.submit(self._application)", "docstring": "Call the SparkSubmitHook to run the provided spark job", "id": "f9352:c0:m1"} {"signature": "def _query_cassandra(self):", "body": "self.hook = CassandraHook(cassandra_conn_id=self.cassandra_conn_id)session = self.hook.get_conn()cursor = session.execute(self.cql)return cursor", "docstring": "Queries cassandra and returns a cursor to the results.", "id": "f9355:c0:m2"} {"signature": "def data_profiling(self):", "body": "return self.data_profiler", "docstring": "Provides access to data profiling tools", "id": "f9360:c2:m6"} {"signature": "@propertydef is_active(self):", "body": "return True", "docstring": "Required by flask_login", "id": "f9360:c2:m2"} {"signature": "@propertydef is_anonymous(self):", "body": "return False", "docstring": "Required by flask_login", "id": "f9363:c0:m3"} {"signature": "def data_profiling(self):", "body": "return True", "docstring": "Provides access to data profiling tools", "id": "f9363:c0:m5"} {"signature": "def _unauthorized():", "body": "return Response(\"\", , {\"\": \"\"})", "docstring": "Indicate that authorization is required\n:return:", "id": "f9364:m3"} {"signature": "def return_code(self):", "body": "raise NotImplementedError()", "docstring": ":return: The return code associated with running the task instance or\n None if the task is not yet done.\n:rtype: int", "id": "f9365:c0:m4"} {"signature": "@propertydef name(self):", "body": "return getattr(self, '', self.__class__.__name__)", "docstring": "The human-readable name for the dependency. Use the classname as the default name\nif this method is not overridden in the subclass.", "id": "f9373:c0:m4"} {"signature": "def run_migrations_online():", "body": "connectable = settings.enginewith connectable.connect() as connection:context.configure(connection=connection,transaction_per_migration=True,target_metadata=target_metadata,compare_type=COMPARE_TYPE,)with context.begin_transaction():context.run_migrations()", "docstring": "Run migrations in 'online' mode.\n\n In this scenario we need to create an Engine\n and associate a connection with the context.", "id": "f9430:m1"} {"signature": "def run_migrations_offline():", "body": "context.configure(url=settings.SQL_ALCHEMY_CONN, target_metadata=target_metadata,literal_binds=True, compare_type=COMPARE_TYPE)with context.begin_transaction():context.run_migrations()", "docstring": "Run migrations in 'offline' mode.\n\n This configures the context with just a URL\n and not an Engine, though an Engine is acceptable\n here as well. By skipping the Engine creation\n we don't even need a DBAPI to be available.\n\n Calls to context.execute() here emit the given string to the\n script output.", "id": "f9430:m0"} {"signature": "def sync_perm_for_dag(self, dag_id, access_control=None):", "body": "for dag_perm in self.DAG_PERMS:perm_on_dag = self.find_permission_view_menu(dag_perm, dag_id)if perm_on_dag is None:self.add_permission_view_menu(dag_perm, dag_id)if access_control:self._sync_dag_view_permissions(dag_id, access_control)", "docstring": "Sync permissions for given dag id. The dag id surely exists in our dag bag\nas only / refresh button or cli.sync_perm will call this function\n\n:param dag_id: the ID of the DAG whose permissions should be updated\n:type dag_id: string\n:param access_control: a dict where each key is a rolename and\n each value is a set() of permission names (e.g.,\n {'can_dag_read'}\n:type access_control: dict\n:return:", "id": "f9435:c0:m15"} {"signature": "def _has_perm(self, permission_name, view_menu_name):", "body": "if hasattr(self, ''):if (permission_name, view_menu_name) in self.perms:return Trueself._get_and_cache_perms()return (permission_name, view_menu_name) in self.perms", "docstring": "Whether the user has this perm", "id": "f9435:c0:m8"} {"signature": "def get_accessible_dag_ids(self, username=None):", "body": "if not username:username = g.userif username.is_anonymous or '' in username.roles:return set()roles = {role.name for role in username.roles}if {'', '', '', ''} & roles:return self.DAG_VMSuser_perms_views = self.get_all_permissions_views()return set([view for perm, view in user_perms_views if perm in self.DAG_PERMS])", "docstring": "Return a set of dags that user has access to(either read or write).\n\n:param username: Name of the user.\n:return: A set of dag ids that the user could access.", "id": "f9435:c0:m4"} {"signature": "def has_all_dags_access(self):", "body": "return (self._has_role(['', '', '', '']) orself._has_perm('', '') orself._has_perm('', ''))", "docstring": "Has all the dag access in any of the 3 cases:\n1. Role needs to be in (Admin, Viewer, User, Op).\n2. Has can_dag_read permission on all_dags view.\n3. Has can_dag_edit permission on all_dags view.", "id": "f9435:c0:m9"} {"signature": "def get_all_permissions_views(self):", "body": "perms_views = set()for role in self.get_user_roles():perms_views.update({(perm_view.permission.name, perm_view.view_menu.name)for perm_view in role.permissions})return perms_views", "docstring": "Returns a set of tuples with the perm name and view menu name", "id": "f9435:c0:m3"} {"signature": "def update_admin_perm_view(self):", "body": "pvms = self.get_session.query(sqla_models.PermissionView).all()pvms = [p for p in pvms if p.permission and p.view_menu]admin = self.find_role('')admin.permissions = list(set(admin.permissions) | set(pvms))self.get_session.commit()", "docstring": "Admin should have all the permission-views.\nAdd the missing ones to the table for admin.\n\n:return: None.", "id": "f9435:c0:m13"} {"signature": "def get_user_roles(self, user=None):", "body": "if user is None:user = g.userif user.is_anonymous:public_role = appbuilder.config.get('')return [appbuilder.security_manager.find_role(public_role)]if public_role else []return user.roles", "docstring": "Get all the roles associated with the user.\n\n:param user: the ab_user in FAB model.\n:return: a list of roles associated with the user.", "id": "f9435:c0:m2"} {"signature": "def sync_roles(self):", "body": "self.log.debug('')self.create_perm_vm_for_all_dag()for config in self.ROLE_CONFIGS:role = config['']vms = config['']perms = config['']self.init_role(role, vms, perms)self.create_custom_dag_permission_view()self.update_admin_perm_view()self.clean_perms()", "docstring": "1. Init the default role(Admin, Viewer, User, Op, public)\n with related permissions.\n2. Init the custom role(dag-user) with related permissions.\n\n:return: None.", "id": "f9435:c0:m14"} {"signature": "@api_experimental.route('', methods=[''])@requires_authenticationdef task_info(dag_id, task_id):", "body": "try:info = get_task(dag_id, task_id)except AirflowException as err:_log.info(err)response = jsonify(error=\"\".format(err))response.status_code = err.status_codereturn responsefields = {k: str(v)for k, v in vars(info).items()if not k.startswith('')}return jsonify(fields)", "docstring": "Returns a JSON with a task's public instance variables.", "id": "f9437:m5"} {"signature": "@api_experimental.route('', methods=[''])@requires_authenticationdef dag_runs(dag_id):", "body": "try:state = request.args.get('')dagruns = get_dag_runs(dag_id, state)except AirflowException as err:_log.info(err)response = jsonify(error=\"\".format(err))response.status_code = return responsereturn jsonify(dagruns)", "docstring": "Returns a list of Dag Runs for a specific DAG ID.\n:query param state: a query string parameter '?state=queued|running|success...'\n:param dag_id: String identifier of a DAG\n:return: List of DAG runs of a DAG with requested state,\nor all runs if the state is not specified", "id": "f9437:m2"} {"signature": "@csrf.exempt@api_experimental.route('', methods=[''])@requires_authenticationdef delete_dag(dag_id):", "body": "try:count = delete.delete_dag(dag_id)except AirflowException as err:_log.error(err)response = jsonify(error=\"\".format(err))response.status_code = err.status_codereturn responsereturn jsonify(message=\"\".format(count), count=count)", "docstring": "Delete all DB records related to the specified Dag.", "id": "f9437:m1"} {"signature": "@api_experimental.route('',methods=[''])@requires_authenticationdef dag_run_status(dag_id, execution_date):", "body": "try:execution_date = timezone.parse(execution_date)except ValueError:error_message = (''''.format(execution_date))_log.info(error_message)response = jsonify({'': error_message})response.status_code = return responsetry:info = get_dag_run_state(dag_id, execution_date)except AirflowException as err:_log.info(err)response = jsonify(error=\"\".format(err))response.status_code = err.status_codereturn responsereturn jsonify(info)", "docstring": "Returns a JSON with a dag_run's public instance variables.\nThe format for the exec_date is expected to be\n\"YYYY-mm-DDTHH:MM:SS\", for example: \"2016-11-16T11:34:15\". This will\nof course need to have been encoded for URL in the request.", "id": "f9437:m8"} {"signature": "@api_experimental.route('', methods=[''])@requires_authenticationdef dag_paused(dag_id, paused):", "body": "DagModel = models.DagModelwith create_session() as session:orm_dag = (session.query(DagModel).filter(DagModel.dag_id == dag_id).first())if paused == '':orm_dag.is_paused = Trueelse:orm_dag.is_paused = Falsesession.merge(orm_dag)session.commit()return jsonify({'': ''})", "docstring": "(Un)pauses a dag", "id": "f9437:m6"} {"signature": "def make_cache_key(*args, **kwargs):", "body": "path = request.pathargs = str(hash(frozenset(request.args.items())))return (path + args).encode('', '')", "docstring": "Used by cache to get a unique key per URL", "id": "f9440:m6"} {"signature": "def get_chart_height(dag):", "body": "return + len(dag.tasks) * ", "docstring": "TODO(aoen): See [AIRFLOW-1263] We use the number of tasks in the DAG as a heuristic to\napproximate the size of generated chart (otherwise the charts are tiny and unreadable\nwhen DAGs have a large number of tasks). Ideally nvd3 should allow for dynamic-height\ncharts, that is charts that take up space based on the size of the components within.", "id": "f9440:m19"} {"signature": "def epoch(dttm):", "body": "return int(time.mktime(dttm.timetuple())) * ,", "docstring": "Returns an epoch-type date", "id": "f9440:m3"} {"signature": "def gzipped(f):", "body": "@functools.wraps(f)def view_func(*args, **kwargs):@after_this_requestdef zipper(response):accept_encoding = request.headers.get('', '')if '' not in accept_encoding.lower():return responseresponse.direct_passthrough = Falseif (response.status_code < or response.status_code >= or'' in response.headers):return responsegzip_buffer = IO()gzip_file = gzip.GzipFile(mode='',fileobj=gzip_buffer)gzip_file.write(response.data)gzip_file.close()response.data = gzip_buffer.getvalue()response.headers[''] = ''response.headers[''] = ''response.headers[''] = len(response.data)return responsereturn f(*args, **kwargs)return view_func", "docstring": "Decorator to make a view compressed", "id": "f9441:m1"} {"signature": "def configure_manifest_files(app):", "body": "def parse_manifest_json():try:global manifestmanifest_file = os.path.join(os.path.dirname(__file__),'')with open(manifest_file, '') as f:manifest.update(json.load(f))for k in manifest.keys():manifest[k] = os.path.join(\"\", manifest[k])except Exception:print(\"\"\"\")passdef get_asset_url(filename):if app.debug:parse_manifest_json()return url_for('', filename=manifest.get(filename, ''))parse_manifest_json()@app.context_processordef get_url_for_asset():\"\"\"\"\"\"return dict(url_for_asset=get_asset_url)", "docstring": "Loads the manifest file and register the `url_for_asset_` template tag.\n:param app:\n:return:", "id": "f9442:m0"} {"signature": "@provide_sessiondef clear(self,start_date=None,end_date=None,upstream=False,downstream=False,session=None):", "body": "TI = TaskInstanceqry = session.query(TI).filter(TI.dag_id == self.dag_id)if start_date:qry = qry.filter(TI.execution_date >= start_date)if end_date:qry = qry.filter(TI.execution_date <= end_date)tasks = [self.task_id]if upstream:tasks += [t.task_id for t in self.get_flat_relatives(upstream=True)]if downstream:tasks += [t.task_id for t in self.get_flat_relatives(upstream=False)]qry = qry.filter(TI.task_id.in_(tasks))count = qry.count()clear_task_instances(qry.all(), session, dag=self.dag)session.commit()return count", "docstring": "Clears the state of task instances associated with the task, following\nthe parameters specified.", "id": "f9444:c0:m34"} {"signature": "def get_flat_relatives(self, upstream=False):", "body": "return list(map(lambda task_id: self._dag.task_dict[task_id],self.get_flat_relative_ids(upstream)))", "docstring": "Get a flat list of relatives, either upstream or downstream.", "id": "f9444:c0:m37"} {"signature": "@propertydef upstream_list(self):", "body": "return [self.dag.get_task(tid) for tid in self._upstream_task_ids]", "docstring": "@property: list of tasks directly upstream", "id": "f9444:c0:m30"} {"signature": "def on_kill(self):", "body": "pass", "docstring": "Override this method to cleanup subprocesses when a task instance\ngets killed. Any use of the threading, subprocess or multiprocessing\nmodule within an operator needs to be cleaned up or it will leave\nghost processes behind.", "id": "f9444:c0:m21"} {"signature": "def get_flat_relative_ids(self, upstream=False, found_descendants=None):", "body": "if not found_descendants:found_descendants = set()relative_ids = self.get_direct_relative_ids(upstream)for relative_id in relative_ids:if relative_id not in found_descendants:found_descendants.add(relative_id)relative_task = self._dag.task_dict[relative_id]relative_task.get_flat_relative_ids(upstream,found_descendants)return found_descendants", "docstring": "Get a flat list of relatives' ids, either upstream or downstream.", "id": "f9444:c0:m36"} {"signature": "@provide_sessiondef get_task_instances(self, start_date=None, end_date=None, session=None):", "body": "end_date = end_date or timezone.utcnow()return session.query(TaskInstance).filter(TaskInstance.dag_id == self.dag_id).filter(TaskInstance.task_id == self.task_id).filter(TaskInstance.execution_date >= start_date).filter(TaskInstance.execution_date <= end_date).order_by(TaskInstance.execution_date).all()", "docstring": "Get a set of task instance related to this task for a specific date\nrange.", "id": "f9444:c0:m35"} {"signature": "@abstractmethoddef get_link(self, operator, dttm):", "body": "pass", "docstring": "Link to external system.\n\n:param operator: airflow operator\n:param dttm: datetime\n:return: link to external system", "id": "f9444:c1:m1"} {"signature": "def execute(self, context):", "body": "raise NotImplementedError()", "docstring": "This is the main method to derive when creating an operator.\nContext is the same dictionary used as when rendering jinja templates.\n\nRefer to get_template_context for more context.", "id": "f9444:c0:m19"} {"signature": "@classmethod@provide_sessiondef get_many(cls,execution_date,key=None,task_ids=None,dag_ids=None,include_prior_dates=False,limit=,session=None):", "body": "filters = []if key:filters.append(cls.key == key)if task_ids:filters.append(cls.task_id.in_(as_tuple(task_ids)))if dag_ids:filters.append(cls.dag_id.in_(as_tuple(dag_ids)))if include_prior_dates:filters.append(cls.execution_date <= execution_date)else:filters.append(cls.execution_date == execution_date)query = (session.query(cls).filter(and_(*filters)).order_by(cls.execution_date.desc(), cls.timestamp.desc()).limit(limit))results = query.all()return results", "docstring": "Retrieve an XCom value, optionally meeting certain criteria\nTODO: \"pickling\" has been deprecated and JSON is preferred.\n\"pickling\" will be removed in Airflow 2.0.", "id": "f9445:c0:m4"} {"signature": "@classmethod@provide_sessiondef get_one(cls,execution_date,key=None,task_id=None,dag_id=None,include_prior_dates=False,session=None):", "body": "filters = []if key:filters.append(cls.key == key)if task_id:filters.append(cls.task_id == task_id)if dag_id:filters.append(cls.dag_id == dag_id)if include_prior_dates:filters.append(cls.execution_date <= execution_date)else:filters.append(cls.execution_date == execution_date)query = (session.query(cls.value).filter(and_(*filters)).order_by(cls.execution_date.desc(), cls.timestamp.desc()))result = query.first()if result:enable_pickling = configuration.getboolean('', '')if enable_pickling:return pickle.loads(result.value)else:try:return json.loads(result.value.decode(''))except ValueError:log = LoggingMixin().loglog.error(\"\"\"\"\"\"\"\")raise", "docstring": "Retrieve an XCom value, optionally meeting certain criteria.\nTODO: \"pickling\" has been deprecated and JSON is preferred.\n\"pickling\" will be removed in Airflow 2.0.\n\n:return: XCom value", "id": "f9445:c0:m3"} {"signature": "@propertydef extra_dejson(self):", "body": "obj = {}if self.extra:try:obj = json.loads(self.extra)except Exception as e:self.log.exception(e)self.log.error(\"\", self.conn_id)return obj", "docstring": "Returns the extra property by deserializing json.", "id": "f9448:c0:m12"} {"signature": "@provide_sessiondef sync_to_db(self, owner=None, sync_time=None, session=None):", "body": "if owner is None:owner = self.ownerif sync_time is None:sync_time = timezone.utcnow()orm_dag = session.query(DagModel).filter(DagModel.dag_id == self.dag_id).first()if not orm_dag:orm_dag = DagModel(dag_id=self.dag_id)self.log.info(\"\", self.dag_id)orm_dag.fileloc = self.filelocorm_dag.is_subdag = self.is_subdagorm_dag.owners = ownerorm_dag.is_active = Trueorm_dag.last_scheduler_run = sync_timeorm_dag.default_view = self._default_vieworm_dag.description = self.descriptionorm_dag.schedule_interval = self.schedule_intervalsession.merge(orm_dag)session.commit()for subdag in self.subdags:subdag.sync_to_db(owner=owner, sync_time=sync_time, session=session)", "docstring": "Save attributes about this DAG to the DB. Note that this method\ncan be called for both DAGs and SubDAGs. A SubDag is actually a\nSubDagOperator.\n\n:param dag: the DAG object to save to the DB\n:type dag: airflow.models.DAG\n:param sync_time: The time that the DAG should be marked as sync'ed\n:type sync_time: datetime\n:return: None", "id": "f9451:c0:m65"} {"signature": "def get_active_runs(self):", "body": "runs = DagRun.find(dag_id=self.dag_id, state=State.RUNNING)active_dates = []for run in runs:active_dates.append(run.execution_date)return active_dates", "docstring": "Returns a list of dag run execution dates currently running\n\n:return: List of execution dates", "id": "f9451:c0:m38"} {"signature": "def add_tasks(self, tasks):", "body": "for task in tasks:self.add_task(task)", "docstring": "Add a list of tasks to the DAG\n\n:param tasks: a lit of tasks you want to add\n:type tasks: list of tasks", "id": "f9451:c0:m61"} {"signature": "def topological_sort(self):", "body": "graph_unsorted = OrderedDict((task.task_id, task) for task in self.tasks)graph_sorted = []if len(self.tasks) == :return tuple(graph_sorted)while graph_unsorted:acyclic = Falsefor node in list(graph_unsorted.values()):for edge in node.upstream_list:if edge.task_id in graph_unsorted:breakelse:acyclic = Truedel graph_unsorted[node.task_id]graph_sorted.append(node)if not acyclic:raise AirflowException(\"\".format(self.dag_id))return tuple(graph_sorted)", "docstring": "Sorts tasks in topographical order, such that a task comes after any of its\nupstream dependencies.\n\nHeavily inspired by:\nhttp://blog.jupo.org/2012/04/06/topological-sorting-acyclic-directed-graphs/\n\n:return: list of tasks in topological order", "id": "f9451:c0:m49"} {"signature": "def run(self,start_date=None,end_date=None,mark_success=False,local=False,executor=None,donot_pickle=configuration.conf.getboolean('', ''),ignore_task_deps=False,ignore_first_depends_on_past=False,pool=None,delay_on_limit_secs=,verbose=False,conf=None,rerun_failed_tasks=False,run_backwards=False,):", "body": "from airflow.jobs import BackfillJobif not executor and local:executor = LocalExecutor()elif not executor:executor = get_default_executor()job = BackfillJob(self,start_date=start_date,end_date=end_date,mark_success=mark_success,executor=executor,donot_pickle=donot_pickle,ignore_task_deps=ignore_task_deps,ignore_first_depends_on_past=ignore_first_depends_on_past,pool=pool,delay_on_limit_secs=delay_on_limit_secs,verbose=verbose,conf=conf,rerun_failed_tasks=rerun_failed_tasks,run_backwards=run_backwards,)job.run()", "docstring": "Runs the DAG.\n\n:param start_date: the start date of the range to run\n:type start_date: datetime.datetime\n:param end_date: the end date of the range to run\n:type end_date: datetime.datetime\n:param mark_success: True to mark jobs as succeeded without running them\n:type mark_success: bool\n:param local: True to run the tasks using the LocalExecutor\n:type local: bool\n:param executor: The executor instance to run the tasks\n:type executor: airflow.executor.BaseExecutor\n:param donot_pickle: True to avoid pickling DAG object and send to workers\n:type donot_pickle: bool\n:param ignore_task_deps: True to skip upstream tasks\n:type ignore_task_deps: bool\n:param ignore_first_depends_on_past: True to ignore depends_on_past\n dependencies for the first set of tasks only\n:type ignore_first_depends_on_past: bool\n:param pool: Resource pool to use\n:type pool: str\n:param delay_on_limit_secs: Time in seconds to wait before next attempt to run\n dag run when max_active_runs limit has been reached\n:type delay_on_limit_secs: float\n:param verbose: Make logging output more verbose\n:type verbose: bool\n:param conf: user defined dictionary passed from CLI\n:type conf: dict\n:param rerun_failed_tasks:\n:type: bool\n:param run_backwards:\n:type: bool", "id": "f9451:c0:m62"} {"signature": "@propertydef folder(self):", "body": "return os.path.dirname(self.full_filepath)", "docstring": "Folder location of where the dag object is instantiated", "id": "f9451:c0:m31"} {"signature": "@provide_sessiondef skip(self, dag_run, execution_date, tasks, session=None):", "body": "if not tasks:returntask_ids = [d.task_id for d in tasks]now = timezone.utcnow()if dag_run:session.query(TaskInstance).filter(TaskInstance.dag_id == dag_run.dag_id,TaskInstance.execution_date == dag_run.execution_date,TaskInstance.task_id.in_(task_ids)).update({TaskInstance.state: State.SKIPPED,TaskInstance.start_date: now,TaskInstance.end_date: now},synchronize_session=False)session.commit()else:assert execution_date is not None, \"\"self.log.warning(\"\")for task in tasks:ti = TaskInstance(task, execution_date=execution_date)ti.state = State.SKIPPEDti.start_date = nowti.end_date = nowsession.merge(ti)session.commit()", "docstring": "Sets tasks instances to skipped from the same dag run.\n\n:param dag_run: the DagRun for which to set the tasks to skipped\n:param execution_date: execution_date\n:param tasks: tasks to skip (not task_ids)\n:param session: db session to use", "id": "f9455:c0:m0"} {"signature": "def is_eligible_to_retry(self):", "body": "return self.task.retries and self.try_number <= self.max_tries", "docstring": "Is task instance is eligible for retry", "id": "f9457:c0:m34"} {"signature": "@provide_sessiondef refresh_from_db(self, session=None, lock_for_update=False):", "body": "TI = TaskInstanceqry = session.query(TI).filter(TI.dag_id == self.dag_id,TI.task_id == self.task_id,TI.execution_date == self.execution_date)if lock_for_update:ti = qry.with_for_update().first()else:ti = qry.first()if ti:self.state = ti.stateself.start_date = ti.start_dateself.end_date = ti.end_dateself.try_number = ti._try_numberself.max_tries = ti.max_triesself.hostname = ti.hostnameself.pid = ti.pidself.executor_config = ti.executor_configelse:self.state = None", "docstring": "Refreshes the task instance from the database based on the primary key\n\n:param lock_for_update: if True, indicates that the database should\n lock the TaskInstance (issuing a FOR UPDATE clause) until the\n session is committed.", "id": "f9457:c0:m13"} {"signature": "def ready_for_retry(self):", "body": "return (self.state == State.UP_FOR_RETRY andself.next_retry_datetime() < timezone.utcnow())", "docstring": "Checks on whether the task instance is in the right state and timeframe\nto be retried.", "id": "f9457:c0:m25"} {"signature": "@provide_sessiondef get_dagrun(self, session):", "body": "from airflow.models.dagrun import DagRun dr = session.query(DagRun).filter(DagRun.dag_id == self.dag_id,DagRun.execution_date == self.execution_date).first()return dr", "docstring": "Returns the DagRun for this TaskInstance\n\n:param session:\n:return: DagRun", "id": "f9457:c0:m27"} {"signature": "@provide_sessiondef clear_xcom_data(self, session=None):", "body": "session.query(XCom).filter(XCom.dag_id == self.dag_id,XCom.task_id == self.task_id,XCom.execution_date == self.execution_date).delete()session.commit()", "docstring": "Clears all XCom data from the database for the task instance", "id": "f9457:c0:m14"} {"signature": "@provide_sessiondef are_dependencies_met(self,dep_context=None,session=None,verbose=False):", "body": "dep_context = dep_context or DepContext()failed = Falseverbose_aware_logger = self.log.info if verbose else self.log.debugfor dep_status in self.get_failed_dep_statuses(dep_context=dep_context,session=session):failed = Trueverbose_aware_logger(\"\",self, dep_status.dep_name, dep_status.reason)if failed:return Falseverbose_aware_logger(\"\", self)return True", "docstring": "Returns whether or not all the conditions are met for this task instance to be run\ngiven the context for the dependencies (e.g. a task instance being force run from\nthe UI will ignore some dependencies).\n\n:param dep_context: The execution context that determines the dependencies that\n should be evaluated.\n:type dep_context: DepContext\n:param session: database session\n:type session: sqlalchemy.orm.session.Session\n:param verbose: whether log details on failed dependencies on\n info or debug log level\n:type verbose: bool", "id": "f9457:c0:m21"} {"signature": "@staticmethod@provide_sessiondef find(dag_id=None, run_id=None, execution_date=None,state=None, external_trigger=None, no_backfills=False,session=None):", "body": "DR = DagRunqry = session.query(DR)if dag_id:qry = qry.filter(DR.dag_id == dag_id)if run_id:qry = qry.filter(DR.run_id == run_id)if execution_date:if isinstance(execution_date, list):qry = qry.filter(DR.execution_date.in_(execution_date))else:qry = qry.filter(DR.execution_date == execution_date)if state:qry = qry.filter(DR.state == state)if external_trigger is not None:qry = qry.filter(DR.external_trigger == external_trigger)if no_backfills:from airflow.jobs import BackfillJobqry = qry.filter(DR.run_id.notlike(BackfillJob.ID_PREFIX + ''))dr = qry.order_by(DR.execution_date).all()return dr", "docstring": "Returns a set of dag runs for the given search criteria.\n\n:param dag_id: the dag_id to find dag runs for\n:type dag_id: int, list\n:param run_id: defines the the run id for this dag run\n:type run_id: str\n:param execution_date: the execution date\n:type execution_date: datetime.datetime\n:param state: the state of the dag run\n:type state: airflow.utils.state.State\n:param external_trigger: whether this dag run is externally triggered\n:type external_trigger: bool\n:param no_backfills: return no backfills (True), return all (False).\n Defaults to False\n:type no_backfills: bool\n:param session: database session\n:type session: sqlalchemy.orm.session.Session", "id": "f9460:c0:m6"} {"signature": "def get_dag(self):", "body": "if not self.dag:raise AirflowException(\"\".format(self))return self.dag", "docstring": "Returns the Dag associated with this DagRun.\n\n:return: DAG", "id": "f9460:c0:m9"} {"signature": "@staticmethoddef get_run(session, dag_id, execution_date):", "body": "qry = session.query(DagRun).filter(DagRun.dag_id == dag_id,DagRun.external_trigger == False, DagRun.execution_date == execution_date,)return qry.first()", "docstring": ":param dag_id: DAG ID\n:type dag_id: unicode\n:param execution_date: execution date\n:type execution_date: datetime\n:return: DagRun corresponding to the given dag_id and execution date\n if one exists. None otherwise.\n:rtype: airflow.models.DagRun", "id": "f9460:c0:m15"} {"signature": "@provide_sessiondef get_task_instance(self, task_id, session=None):", "body": "from airflow.models.taskinstance import TaskInstance TI = TaskInstanceti = session.query(TI).filter(TI.dag_id == self.dag_id,TI.execution_date == self.execution_date,TI.task_id == task_id).first()return ti", "docstring": "Returns the task instance specified by task_id for this dag run\n\n:param task_id: the task id", "id": "f9460:c0:m8"} {"signature": "@provide_sessiondef verify_integrity(self, session=None):", "body": "from airflow.models.taskinstance import TaskInstance dag = self.get_dag()tis = self.get_task_instances(session=session)task_ids = []for ti in tis:task_ids.append(ti.task_id)task = Nonetry:task = dag.get_task(ti.task_id)except AirflowException:if ti.state == State.REMOVED:pass elif self.state is not State.RUNNING and not dag.partial:self.log.warning(\"\"\"\".format(ti, dag))Stats.incr(\"\".format(dag.dag_id), , )ti.state = State.REMOVEDis_task_in_dag = task is not Noneshould_restore_task = is_task_in_dag and ti.state == State.REMOVEDif should_restore_task:self.log.info(\"\"\"\".format(ti, dag))Stats.incr(\"\".format(dag.dag_id), , )ti.state = State.NONEfor task in six.itervalues(dag.task_dict):if task.start_date > self.execution_date and not self.is_backfill:continueif task.task_id not in task_ids:Stats.incr(\"\".format(task.__class__.__name__),, )ti = TaskInstance(task, self.execution_date)session.add(ti)session.commit()", "docstring": "Verifies the DagRun by checking for removed tasks or tasks that are not in the\ndatabase yet. It will set state to removed or add the task if required.", "id": "f9460:c0:m14"} {"signature": "@provide_sessiondef _task_instances_for_dag_run(self, dag_run, session=None):", "body": "tasks_to_run = {}if dag_run is None:return tasks_to_runself.reset_state_for_orphaned_tasks(filter_by_dag_run=dag_run, session=session)dag_run.refresh_from_db()make_transient(dag_run)for ti in dag_run.get_task_instances():if ti.state == State.NONE:ti.set_state(State.SCHEDULED, session=session)if ti.state != State.REMOVED:tasks_to_run[ti.key] = tireturn tasks_to_run", "docstring": "Returns a map of task instance key to task instance object for the tasks to\nrun in the given dag run.\n\n:param dag_run: the dag run to get the tasks from\n:type dag_run: airflow.models.DagRun\n:param session: the database session object\n:type session: sqlalchemy.orm.session.Session", "id": "f9464:c3:m4"} {"signature": "@propertydef start_time(self):", "body": "if self._start_time is None:raise AirflowException(\"\")return self._start_time", "docstring": ":return: when this started to process the file\n:rtype: datetime", "id": "f9464:c1:m9"} {"signature": "@provide_sessiondef _set_unfinished_dag_runs_to_failed(self, dag_runs, session=None):", "body": "for dag_run in dag_runs:dag_run.update_state()if dag_run.state not in State.finished():dag_run.set_state(State.FAILED)session.merge(dag_run)", "docstring": "Go through the dag_runs and update the state based on the task_instance state.\nThen set DAG runs that are not finished to failed.\n\n:param dag_runs: DAG runs\n:param session: session\n:return: None", "id": "f9464:c3:m9"} {"signature": "@propertydef result(self):", "body": "if not self.done:raise AirflowException(\"\")return self._result", "docstring": ":return: result of running SchedulerJob.process_file()\n:rtype: airflow.utils.dag_processing.SimpleDag", "id": "f9464:c1:m8"} {"signature": "@provide_sessiondef _process_backfill_task_instances(self,ti_status,executor,pickle_id,start_date=None, session=None):", "body": "executed_run_dates = []while ((len(ti_status.to_run) > or len(ti_status.running) > ) andlen(ti_status.deadlocked) == ):self.log.debug(\"\")ti_status.not_ready.clear()@provide_sessiondef _per_task_process(task, key, ti, session=None):ti.refresh_from_db()task = self.dag.get_task(ti.task_id)ti.task = taskignore_depends_on_past = (self.ignore_first_depends_on_past andti.execution_date == (start_date or ti.start_date))self.log.debug(\"\", ti, ti.state)if ti.state == State.SUCCESS:ti_status.succeeded.add(key)self.log.debug(\"\", ti)ti_status.to_run.pop(key)if key in ti_status.running:ti_status.running.pop(key)returnelif ti.state == State.SKIPPED:ti_status.skipped.add(key)self.log.debug(\"\", ti)ti_status.to_run.pop(key)if key in ti_status.running:ti_status.running.pop(key)returnelif ti.state == State.NONE:self.log.warning(\"\"\"\")ti.set_state(State.SCHEDULED, session=session)if self.rerun_failed_tasks:if ti.state in (State.FAILED, State.UPSTREAM_FAILED):self.log.error(\"\"\"\".format(ti=ti,state=ti.state))if key in ti_status.running:ti_status.running.pop(key)ti.set_state(State.SCHEDULED, session=session)else:if ti.state in (State.FAILED, State.UPSTREAM_FAILED):self.log.error(\"\"\"\".format(ti=ti,state=ti.state))ti_status.failed.add(key)ti_status.to_run.pop(key)if key in ti_status.running:ti_status.running.pop(key)returnbackfill_context = DepContext(deps=RUN_DEPS,ignore_depends_on_past=ignore_depends_on_past,ignore_task_deps=self.ignore_task_deps,flag_upstream_failed=True)if ti.are_dependencies_met(dep_context=backfill_context,session=session,verbose=self.verbose):ti.refresh_from_db(lock_for_update=True, session=session)if ti.state in (State.SCHEDULED, State.UP_FOR_RETRY, State.UP_FOR_RESCHEDULE):if executor.has_task(ti):self.log.debug(\"\"\"\",ti)else:self.log.debug('', ti)ti.state = State.QUEUEDti.queued_dttm = timezone.utcnow() if not ti.queued_dttm else ti.queued_dttmsession.merge(ti)cfg_path = Noneif executor.__class__ in (executors.LocalExecutor,executors.SequentialExecutor):cfg_path = tmp_configuration_copy()executor.queue_task_instance(ti,mark_success=self.mark_success,pickle_id=pickle_id,ignore_task_deps=self.ignore_task_deps,ignore_depends_on_past=ignore_depends_on_past,pool=self.pool,cfg_path=cfg_path)ti_status.running[key] = titi_status.to_run.pop(key)session.commit()returnif ti.state == State.UPSTREAM_FAILED:self.log.error(\"\", ti)ti_status.failed.add(key)ti_status.to_run.pop(key)if key in ti_status.running:ti_status.running.pop(key)returnif ti.state == State.UP_FOR_RETRY:self.log.debug(\"\"\"\", ti)if key in ti_status.running:ti_status.running.pop(key)ti_status.to_run[key] = tireturnif ti.state == State.UP_FOR_RESCHEDULE:self.log.debug(\"\"\"\", ti)if key in ti_status.running:ti_status.running.pop(key)ti_status.to_run[key] = tireturnself.log.debug('', ti)ti_status.not_ready.add(key)non_pool_slots = conf.getint('', '')try:for task in self.dag.topological_sort():for key, ti in list(ti_status.to_run.items()):if task.task_id != ti.task_id:continueif task.pool:pool = session.query(models.Pool).filter(models.Pool.pool == task.pool).first()if not pool:raise PoolNotFound(''.format(task.pool))open_slots = pool.open_slots(session=session)if open_slots <= :raise NoAvailablePoolSlot(\"\"\"\".format(open_slots, task.pool))else:if non_pool_slots <= :raise NoAvailablePoolSlot(\"\"\"\")non_pool_slots -= num_running_tasks = DAG.get_num_task_instances(self.dag_id,states=(State.QUEUED, State.RUNNING))if num_running_tasks >= self.dag.concurrency:raise DagConcurrencyLimitReached(\"\"\"\")_per_task_process(task, key, ti)except (NoAvailablePoolSlot, DagConcurrencyLimitReached) as e:self.log.debug(e)self.heartbeat()executor.heartbeat()if (ti_status.not_ready andti_status.not_ready == set(ti_status.to_run) andlen(ti_status.running) == ):self.log.warning(\"\",ti_status.to_run.values())ti_status.deadlocked.update(ti_status.to_run.values())ti_status.to_run.clear()self._manage_executor_state(ti_status.running)self._update_counters(ti_status=ti_status)_dag_runs = ti_status.active_runs[:]for run in _dag_runs:run.update_state(session=session)if run.state in State.finished():ti_status.finished_runs += ti_status.active_runs.remove(run)executed_run_dates.append(run.execution_date)self._log_progress(ti_status)return executed_run_dates", "docstring": "Process a set of task instances from a set of dag runs. Special handling is done\nto account for different task instance states that could be present when running\nthem in a backfill process.\n\n:param ti_status: the internal status of the job\n:type ti_status: BackfillJob._DagRunTaskStatus\n:param executor: the executor to run the task instances\n:type executor: BaseExecutor\n:param pickle_id: the pickle_id if dag is pickled, None otherwise\n:type pickle_id: int\n:param start_date: the start date of the backfill job\n:type start_date: datetime.datetime\n:param session: the current session object\n:type session: sqlalchemy.orm.session.Session\n:return: the list of execution_dates for the finished dag runs\n:rtype: list", "id": "f9464:c3:m6"} {"signature": "@provide_sessiondef _find_executable_task_instances(self, simple_dag_bag, states, session=None):", "body": "executable_tis = []TI = models.TaskInstanceDR = models.DagRunDM = models.DagModelti_query = (session.query(TI).filter(TI.dag_id.in_(simple_dag_bag.dag_ids)).outerjoin(DR,and_(DR.dag_id == TI.dag_id, DR.execution_date == TI.execution_date)).filter(or_(DR.run_id == None, not_(DR.run_id.like(BackfillJob.ID_PREFIX + '')))).outerjoin(DM, DM.dag_id == TI.dag_id).filter(or_(DM.dag_id == None, not_(DM.is_paused))))if None in states:ti_query = ti_query.filter(or_(TI.state == None, TI.state.in_(states)) )else:ti_query = ti_query.filter(TI.state.in_(states))task_instances_to_examine = ti_query.all()if len(task_instances_to_examine) == :self.log.debug(\"\")return executable_tistask_instance_str = \"\".join([repr(x) for x in task_instances_to_examine])self.log.info(\"\", len(task_instances_to_examine),task_instance_str)pools = {p.pool: p for p in session.query(models.Pool).all()}pool_to_task_instances = defaultdict(list)for task_instance in task_instances_to_examine:pool_to_task_instances[task_instance.pool].append(task_instance)states_to_count_as_running = [State.RUNNING, State.QUEUED]dag_concurrency_map, task_concurrency_map = self.__get_concurrency_maps(states=states_to_count_as_running, session=session)for pool, task_instances in pool_to_task_instances.items():pool_name = poolif not pool:open_slots = models.Pool.default_pool_open_slots()pool_name = models.Pool.default_pool_nameelse:if pool not in pools:self.log.warning(\"\",pool)open_slots = else:open_slots = pools[pool].open_slots(session=session)num_ready = len(task_instances)self.log.info(\"\"\"\",pool, open_slots, num_ready)priority_sorted_task_instances = sorted(task_instances, key=lambda ti: (-ti.priority_weight, ti.execution_date))num_starving_tasks = for current_index, task_instance in enumerate(priority_sorted_task_instances):if open_slots <= :self.log.info(\"\",open_slots, pool)num_starving_tasks = len(priority_sorted_task_instances) - current_indexbreakdag_id = task_instance.dag_idsimple_dag = simple_dag_bag.get_dag(dag_id)current_dag_concurrency = dag_concurrency_map[dag_id]dag_concurrency_limit = simple_dag_bag.get_dag(dag_id).concurrencyself.log.info(\"\",dag_id, current_dag_concurrency, dag_concurrency_limit)if current_dag_concurrency >= dag_concurrency_limit:self.log.info(\"\"\"\",task_instance, dag_id, dag_concurrency_limit)continuetask_concurrency_limit = simple_dag.get_task_special_arg(task_instance.task_id,'')if task_concurrency_limit is not None:current_task_concurrency = task_concurrency_map[(task_instance.dag_id, task_instance.task_id)]if current_task_concurrency >= task_concurrency_limit:self.log.info(\"\"\"\", task_instance)continueif self.executor.has_task(task_instance):self.log.debug(\"\",task_instance.key)continueexecutable_tis.append(task_instance)open_slots -= dag_concurrency_map[dag_id] += task_concurrency_map[(task_instance.dag_id, task_instance.task_id)] += Stats.gauge(''.format(pool_name=pool_name),num_starving_tasks)task_instance_str = \"\".join([repr(x) for x in executable_tis])self.log.info(\"\", task_instance_str)for ti in executable_tis:copy_dag_id = ti.dag_idcopy_execution_date = ti.execution_datecopy_task_id = ti.task_idmake_transient(ti)ti.dag_id = copy_dag_idti.execution_date = copy_execution_dateti.task_id = copy_task_idreturn executable_tis", "docstring": "Finds TIs that are ready for execution with respect to pool limits,\ndag concurrency, executor state, and priority.\n\n:param simple_dag_bag: TaskInstances associated with DAGs in the\n simple_dag_bag will be fetched from the DB and executed\n:type simple_dag_bag: airflow.utils.dag_processing.SimpleDagBag\n:param executor: the executor that runs task instances\n:type executor: BaseExecutor\n:param states: Execute TaskInstances in these states\n:type states: tuple[airflow.utils.state.State]\n:return: list[airflow.models.TaskInstance]", "id": "f9464:c2:m8"} {"signature": "def heartbeat(self):", "body": "try:with create_session() as session:job = session.query(BaseJob).filter_by(id=self.id).one()make_transient(job)session.commit()if job.state == State.SHUTDOWN:self.kill()is_unit_test = conf.getboolean('', '')if not is_unit_test:sleep_for = if job.latest_heartbeat:seconds_remaining = self.heartrate -(timezone.utcnow() - job.latest_heartbeat).total_seconds()sleep_for = max(, seconds_remaining)sleep(sleep_for)with create_session() as session:job = session.query(BaseJob).filter(BaseJob.id == self.id).first()job.latest_heartbeat = timezone.utcnow()session.merge(job)session.commit()self.heartbeat_callback(session=session)self.log.debug('')except OperationalError as e:self.log.error(\"\", str(e))", "docstring": "Heartbeats update the job's entry in the database with a timestamp\nfor the latest_heartbeat and allows for the job to be killed\nexternally. This allows at the system level to monitor what is\nactually active.\n\nFor instance, an old heartbeat for SchedulerJob would mean something\nis wrong.\n\nThis also allows for any job to be killed externally, regardless\nof who is running it or on which machine it is running.\n\nNote that if your heartbeat is set to 60 seconds and you call this\nmethod after 10 seconds of processing since the last heartbeat, it\nwill sleep 50 seconds to complete the 60 seconds and keep a steady\nheart rate. If you go over 60 seconds before calling it, it won't\nsleep at all.", "id": "f9464:c0:m5"} {"signature": "@propertydef done(self):", "body": "if self._process is None:raise AirflowException(\"\")if self._done:return Trueif self._result_queue and not self._result_queue.empty():self._result = self._result_queue.get_nowait()self._done = Trueself.log.debug(\"\", self._process)self._process.join()return Trueif self._result_queue and not self._process.is_alive():self._done = Trueif not self._result_queue.empty():self._result = self._result_queue.get_nowait()self.log.debug(\"\", self._process)self._process.join()return Truereturn False", "docstring": "Check if the process launched to process this file is done.\n\n:return: whether the process is finished running\n:rtype: bool", "id": "f9464:c1:m7"} {"signature": "@provide_sessiondef _execute(self, session=None):", "body": "ti_status = BackfillJob._DagRunTaskStatus()start_date = self.bf_start_daterun_dates = self.dag.get_run_dates(start_date=start_date,end_date=self.bf_end_date)if self.run_backwards:tasks_that_depend_on_past = [t.task_id for t in self.dag.task_dict.values() if t.depends_on_past]if tasks_that_depend_on_past:raise AirflowException(''.format(\"\".join(tasks_that_depend_on_past)))run_dates = run_dates[::-]if len(run_dates) == :self.log.info(\"\")returnpickle_id = Noneif not self.donot_pickle and self.executor.__class__ not in (executors.LocalExecutor, executors.SequentialExecutor):pickle = DagPickle(self.dag)session.add(pickle)session.commit()pickle_id = pickle.idexecutor = self.executorexecutor.start()ti_status.total_runs = len(run_dates) try:remaining_dates = ti_status.total_runswhile remaining_dates > :dates_to_process = [run_date for run_date in run_datesif run_date not in ti_status.executed_dag_run_dates]self._execute_for_run_dates(run_dates=dates_to_process,ti_status=ti_status,executor=executor,pickle_id=pickle_id,start_date=start_date,session=session)remaining_dates = (ti_status.total_runs - len(ti_status.executed_dag_run_dates))err = self._collect_errors(ti_status=ti_status, session=session)if err:raise AirflowException(err)if remaining_dates > :self.log.info(\"\"\"\",self.dag_id)time.sleep(self.delay_on_limit_secs)except (KeyboardInterrupt, SystemExit):self.log.warning(\"\")self._set_unfinished_dag_runs_to_failed(ti_status.active_runs)finally:session.commit()executor.end()self.log.info(\"\")", "docstring": "Initializes all components required to run a dag for a specified date range and\ncalls helper method to execute the tasks.", "id": "f9464:c3:m10"} {"signature": "def __init__(self, file_path, pickle_dags, dag_id_white_list, zombies):", "body": "self._file_path = file_pathself._result_queue = multiprocessing.Queue()self._process = Noneself._dag_id_white_list = dag_id_white_listself._pickle_dags = pickle_dagsself._zombies = zombiesself._result = Noneself._done = Falseself._start_time = Noneself._instance_id = DagFileProcessor.class_creation_counterDagFileProcessor.class_creation_counter += ", "docstring": ":param file_path: a Python file containing Airflow DAG definitions\n:type file_path: unicode\n:param pickle_dags: whether to serialize the DAG objects to the DB\n:type pickle_dags: bool\n:param dag_id_whitelist: If specified, only look at these DAG ID's\n:type dag_id_whitelist: list[unicode]\n:param zombies: zombie task instances to kill\n:type zombies: list[airflow.utils.dag_processing.SimpleTaskInstance]", "id": "f9464:c1:m0"} {"signature": "@staticmethoddef update_import_errors(session, dagbag):", "body": "for dagbag_file in dagbag.file_last_changed:session.query(errors.ImportError).filter(errors.ImportError.filename == dagbag_file).delete()for filename, stacktrace in six.iteritems(dagbag.import_errors):session.add(errors.ImportError(filename=filename,stacktrace=stacktrace))session.commit()", "docstring": "For the DAGs in the given DagBag, record any associated import errors and clears\nerrors for files that no longer have them. These are usually displayed through the\nAirflow UI so that users know that there are issues parsing DAGs.\n\n:param session: session for ORM operations\n:type session: sqlalchemy.orm.session.Session\n:param dagbag: DagBag containing DAGs with import errors\n:type dagbag: airflow.models.DagBag", "id": "f9464:c2:m3"} {"signature": "def send_lineage(self,operator=None, inlets=None, outlets=None, context=None):", "body": "raise NotImplementedError()", "docstring": "Sends lineage metadata to a backend\n:param operator: the operator executing a transformation on the inlets and outlets\n:param inlets: the inlets to this operator\n:param outlets: the outlets from this operator\n:param context: the current context of the task instance", "id": "f9468:c0:m0"} {"signature": "def parse(string, timezone=None):", "body": "return pendulum.parse(string, tz=timezone or TIMEZONE)", "docstring": "Parse a time string and return an aware datetime\n:param string: time string", "id": "f9470:m8"} {"signature": "def make_naive(value, timezone=None):", "body": "if timezone is None:timezone = TIMEZONEif is_naive(value):raise ValueError(\"\")o = value.astimezone(timezone)naive = dt.datetime(o.year,o.month,o.day,o.hour,o.minute,o.second,o.microsecond)return naive", "docstring": "Make an aware datetime.datetime naive in a given time zone.\n\n:param value: datetime\n:param timezone: timezone\n:return: naive datetime", "id": "f9470:m6"} {"signature": "def datetime(*args, **kwargs):", "body": "if '' not in kwargs:kwargs[''] = TIMEZONEreturn dt.datetime(*args, **kwargs)", "docstring": "Wrapper around datetime.datetime that adds settings.TIMEZONE if tzinfo not specified\n\n:return: datetime.datetime", "id": "f9470:m7"} {"signature": "def utc_epoch():", "body": "d = dt.datetime(, , )d = d.replace(tzinfo=utc)return d", "docstring": "Gets the epoch in the users timezone\n:return:", "id": "f9470:m3"} {"signature": "def context_to_airflow_vars(context, in_env_var_format=False):", "body": "params = dict()if in_env_var_format:name_format = ''else:name_format = ''task_instance = context.get('')if task_instance and task_instance.dag_id:params[AIRFLOW_VAR_NAME_FORMAT_MAPPING[''][name_format]] = task_instance.dag_idif task_instance and task_instance.task_id:params[AIRFLOW_VAR_NAME_FORMAT_MAPPING[''][name_format]] = task_instance.task_idif task_instance and task_instance.execution_date:params[AIRFLOW_VAR_NAME_FORMAT_MAPPING[''][name_format]] = task_instance.execution_date.isoformat()dag_run = context.get('')if dag_run and dag_run.run_id:params[AIRFLOW_VAR_NAME_FORMAT_MAPPING[''][name_format]] = dag_run.run_idreturn params", "docstring": "Given a context, this function provides a dictionary of values that can be used to\nexternally reconstruct relations between dags, dag_runs, tasks and task_instances.\nDefault to abc.def.ghi format and can be made to ABC_DEF_GHI format if\nin_env_var_format is set to True.\n\n:param context: The context for the task_instance of interest.\n:type context: dict\n:param in_env_var_format: If returned vars should be in ABC_DEF_GHI format.\n:type in_env_var_format: bool\n:return: task_instance context as dict.", "id": "f9471:m0"} {"signature": "def close(self):", "body": "if self.closed:returnsuper().close()if not self.upload_on_close:returnlocal_loc = os.path.join(self.local_base, self.log_relative_path)remote_loc = os.path.join(self.remote_base, self.log_relative_path)if os.path.exists(local_loc):with open(local_loc, '') as logfile:log = logfile.read()self.s3_write(log, remote_loc)self.closed = True", "docstring": "Close and upload local log file to remote storage S3.", "id": "f9473:c0:m3"} {"signature": "def _read(self, ti, try_number, metadata=None):", "body": "log_relative_path = self._render_filename(ti, try_number)remote_loc = os.path.join(self.remote_base, log_relative_path)if self.s3_log_exists(remote_loc):remote_log = self.s3_read(remote_loc, return_error=True)log = ''.format(remote_loc, remote_log)return log, {'': True}else:return super()._read(ti, try_number)", "docstring": "Read logs of given task instance and try_number from S3 remote storage.\nIf failed, read the log from task instance host machine.\n:param ti: task instance object\n:param try_number: task instance try_number to read logs from\n:param metadata: log metadata,\n can be used for steaming log reading and auto-tailing.", "id": "f9473:c0:m4"} {"signature": "def _read(self, ti, try_number, metadata=None):", "body": "log_relative_path = self._render_filename(ti, try_number)remote_loc = os.path.join(self.remote_base, log_relative_path)if self.wasb_log_exists(remote_loc):remote_log = self.wasb_read(remote_loc, return_error=True)log = ''.format(remote_loc, remote_log)return log, {'': True}else:return super()._read(ti, try_number)", "docstring": "Read logs of given task instance and try_number from Wasb remote storage.\nIf failed, read the log from task instance host machine.\n:param ti: task instance object\n:param try_number: task instance try_number to read logs from\n:param metadata: log metadata,\n can be used for steaming log reading and auto-tailing.", "id": "f9474:c0:m4"} {"signature": "def wasb_log_exists(self, remote_log_location):", "body": "try:return self.hook.check_for_blob(self.wasb_container, remote_log_location)except Exception:passreturn False", "docstring": "Check if remote_log_location exists in remote storage\n:param remote_log_location: log's location in remote storage\n:return: True if location exists else False", "id": "f9474:c0:m5"} {"signature": "def wasb_write(self, log, remote_log_location, append=True):", "body": "if append and self.wasb_log_exists(remote_log_location):old_log = self.wasb_read(remote_log_location)log = ''.join([old_log, log]) if old_log else logtry:self.hook.load_string(log,self.wasb_container,remote_log_location,)except AzureHttpError:self.log.exception('',remote_log_location)", "docstring": "Writes the log to the remote_log_location. Fails silently if no hook\nwas created.\n:param log: the log to write to the remote_log_location\n:type log: str\n:param remote_log_location: the log's location in remote storage\n:type remote_log_location: str (path)\n:param append: if False, any existing log file is overwritten. If True,\n the new log is appended to any existing logs.\n:type append: bool", "id": "f9474:c0:m7"} {"signature": "def flush(self):", "body": "if len(self._buffer) > :self.logger.log(self.level, self._buffer)self._buffer = str()", "docstring": "Ensure all logging output has been flushed", "id": "f9475:c1:m3"} {"signature": "@propertydef closed(self):", "body": "return False", "docstring": "Returns False to indicate that the stream is not closed (as it will be\nopen for the duration of Airflow's lifecycle).\n\nFor compatibility with the io.IOBase interface.", "id": "f9475:c1:m1"} {"signature": "def _read(self, ti, try_number, metadata=None):", "body": "log_relative_path = self._render_filename(ti, try_number)remote_loc = os.path.join(self.remote_base, log_relative_path)try:remote_log = self.gcs_read(remote_loc)log = ''.format(remote_loc, remote_log)return log, {'': True}except Exception as e:log = ''.format(remote_loc, str(e))self.log.error(log)local_log, metadata = super()._read(ti, try_number)log += local_logreturn log, metadata", "docstring": "Read logs of given task instance and try_number from GCS.\nIf failed, read the log from task instance host machine.\n:param ti: task instance object\n:param try_number: task instance try_number to read logs from\n:param metadata: log metadata,\n can be used for steaming log reading and auto-tailing.", "id": "f9479:c0:m4"} {"signature": "def _build_metrics(func_name, namespace):", "body": "metrics = {'': func_name, '': datetime.utcnow(),'': ''.format(list(sys.argv)), '': getpass.getuser()}assert isinstance(namespace, Namespace)tmp_dic = vars(namespace)metrics[''] = tmp_dic.get('')metrics[''] = tmp_dic.get('')metrics[''] = tmp_dic.get('')metrics[''] = socket.gethostname()extra = json.dumps(dict((k, metrics[k]) for k in ('', '')))log = Log(event=''.format(func_name),task_instance=None,owner=metrics[''],extra=extra,task_id=metrics.get(''),dag_id=metrics.get(''),execution_date=metrics.get(''))metrics[''] = logreturn metrics", "docstring": "Builds metrics dict from function args\nIt assumes that function arguments is from airflow.bin.cli module's function\nand has Namespace instance where it optionally contains \"dag_id\", \"task_id\",\nand \"execution_date\".\n\n:param func_name: name of function\n:param namespace: Namespace instance from argparse\n:return: dict with metrics", "id": "f9481:m1"} {"signature": "def action_logging(f):", "body": "@functools.wraps(f)def wrapper(*args, **kwargs):\"\"\"\"\"\"assert argsassert isinstance(args[], Namespace),\"\"\"\".format(args[])metrics = _build_metrics(f.__name__, args[])cli_action_loggers.on_pre_execution(**metrics)try:return f(*args, **kwargs)except Exception as e:metrics[''] = eraisefinally:metrics[''] = datetime.utcnow()cli_action_loggers.on_post_execution(**metrics)return wrapper", "docstring": "Decorates function to execute function at the same time submitting action_logging\nbut in CLI context. It will call action logger callbacks twice,\none for pre-execution and the other one for post-execution.\n\nAction logger will be called with below keyword parameters:\n sub_command : name of sub-command\n start_datetime : start datetime instance by utc\n end_datetime : end datetime instance by utc\n full_command : full command line arguments\n user : current user\n log : airflow.models.log.Log ORM instance\n dag_id : dag id (optional)\n task_id : task_id (optional)\n execution_date : execution date (optional)\n error : exception instance if there's an exception\n\n:param f: function instance\n:return: wrapped function", "id": "f9481:m0"} {"signature": "def days_ago(n, hour=, minute=, second=, microsecond=):", "body": "today = timezone.utcnow().replace(hour=hour,minute=minute,second=second,microsecond=microsecond)return today - timedelta(days=n)", "docstring": "Get a datetime object representing `n` days ago. By default the time is\nset to midnight.", "id": "f9485:m4"} {"signature": "def date_range(start_date, end_date=None, num=None, delta=None):", "body": "if not delta:return []if end_date and start_date > end_date:raise Exception(\"\")if end_date and num:raise Exception(\"\")if not end_date and not num:end_date = timezone.utcnow()delta_iscron = Falsetz = start_date.tzinfoif isinstance(delta, six.string_types):delta_iscron = Truestart_date = timezone.make_naive(start_date, tz)cron = croniter(delta, start_date)elif isinstance(delta, timedelta):delta = abs(delta)dates = []if end_date:if timezone.is_naive(start_date):end_date = timezone.make_naive(end_date, tz)while start_date <= end_date:if timezone.is_naive(start_date):dates.append(timezone.make_aware(start_date, tz))else:dates.append(start_date)if delta_iscron:start_date = cron.get_next(datetime)else:start_date += deltaelse:for _ in range(abs(num)):if timezone.is_naive(start_date):dates.append(timezone.make_aware(start_date, tz))else:dates.append(start_date)if delta_iscron:if num > :start_date = cron.get_next(datetime)else:start_date = cron.get_prev(datetime)else:if num > :start_date += deltaelse:start_date -= deltareturn sorted(dates)", "docstring": "Get a set of dates as a list based on a start, end and delta, delta\ncan be something that can be added to `datetime.datetime`\nor a cron expression as a `str`\n\n:Example::\n\n date_range(datetime(2016, 1, 1), datetime(2016, 1, 3), delta=timedelta(1))\n [datetime.datetime(2016, 1, 1, 0, 0), datetime.datetime(2016, 1, 2, 0, 0),\n datetime.datetime(2016, 1, 3, 0, 0)]\n date_range(datetime(2016, 1, 1), datetime(2016, 1, 3), delta='0 0 * * *')\n [datetime.datetime(2016, 1, 1, 0, 0), datetime.datetime(2016, 1, 2, 0, 0),\n datetime.datetime(2016, 1, 3, 0, 0)]\n date_range(datetime(2016, 1, 1), datetime(2016, 3, 3), delta=\"0 0 0 * *\")\n [datetime.datetime(2016, 1, 1, 0, 0), datetime.datetime(2016, 2, 1, 0, 0),\n datetime.datetime(2016, 3, 1, 0, 0)]\n\n:param start_date: anchor date to start the series from\n:type start_date: datetime.datetime\n:param end_date: right boundary for the date range\n:type end_date: datetime.datetime\n:param num: alternatively to end_date, you can specify the number of\n number of entries you want in the range. This number can be negative,\n output will always be sorted regardless\n:type num: int", "id": "f9485:m0"} {"signature": "def send_email(to, subject, html_content,files=None, dryrun=False, cc=None, bcc=None,mime_subtype='', mime_charset='', **kwargs):", "body": "path, attr = configuration.conf.get('', '').rsplit('', )module = importlib.import_module(path)backend = getattr(module, attr)to = get_email_address_list(to)to = \"\".join(to)return backend(to, subject, html_content, files=files,dryrun=dryrun, cc=cc, bcc=bcc,mime_subtype=mime_subtype, mime_charset=mime_charset, **kwargs)", "docstring": "Send email using backend specified in EMAIL_BACKEND.", "id": "f9486:m0"} {"signature": "def tmp_configuration_copy(chmod=):", "body": "cfg_dict = conf.as_dict(display_sensitive=True, raw=True)temp_fd, cfg_path = mkstemp()with os.fdopen(temp_fd, '') as temp_file:if chmod is not None:os.fchmod(temp_fd, chmod)json.dump(cfg_dict, temp_file)return cfg_path", "docstring": "Returns a path for a temporary file including a full copy of the configuration\nsettings.\n:return: a path to a temporary file", "id": "f9489:m0"} {"signature": "def chain(*tasks):", "body": "for up_task, down_task in zip(tasks[:-], tasks[:]):up_task.set_downstream(down_task)", "docstring": "Given a number of tasks, builds a dependency chain.\n\nchain(task_1, task_2, task_3, task_4)\n\nis equivalent to\n\ntask_1.set_downstream(task_2)\ntask_2.set_downstream(task_3)\ntask_3.set_downstream(task_4)", "id": "f9491:m9"} {"signature": "def pprinttable(rows):", "body": "if not rows:returnif hasattr(rows[], ''): headers = rows[]._fieldselse:headers = [\"\".format(i) for i in range(len(rows[]))]lens = [len(s) for s in headers]for row in rows:for i in range(len(rows[])):slenght = len(\"\".format(row[i]))if slenght > lens[i]:lens[i] = slenghtformats = []hformats = []for i in range(len(rows[])):if isinstance(rows[][i], int):formats.append(\"\" % lens[i])else:formats.append(\"\" % lens[i])hformats.append(\"\" % lens[i])pattern = \"\".join(formats)hpattern = \"\".join(hformats)separator = \"\".join(['' * n for n in lens])s = \"\"s += separator + ''s += (hpattern % tuple(headers)) + ''s += separator + ''def f(t):return \"\".format(t) if isinstance(t, basestring) else tfor line in rows:s += pattern % tuple(f(t) for t in line) + ''s += separator + ''return s", "docstring": "Returns a pretty ascii table from tuples\n\n If namedtuple are used, the table will have headers", "id": "f9491:m11"} {"signature": "@propertydef concurrency(self):", "body": "return self._concurrency", "docstring": ":return: maximum number of tasks that can run simultaneously from this DAG\n:rtype: int", "id": "f9493:c0:m4"} {"signature": "def get_all_pids(self):", "body": "return [x.pid for x in self._processors.values()]", "docstring": ":return: a list of the PIDs for the processors that are running\n:rtype: List[int]", "id": "f9493:c6:m11"} {"signature": "@propertydef dag_ids(self):", "body": "return self.dag_id_to_simple_dag.keys()", "docstring": ":return: IDs of all the DAGs in this\n:rtype: list[unicode]", "id": "f9493:c2:m1"} {"signature": "def _refresh_dag_dir(self):", "body": "elapsed_time_since_refresh = (timezone.utcnow() -self.last_dag_dir_refresh_time).total_seconds()if elapsed_time_since_refresh > self.dag_dir_list_interval:self.log.info(\"\", self._dag_directory)self._file_paths = list_py_file_paths(self._dag_directory)self.last_dag_dir_refresh_time = timezone.utcnow()self.log.info(\"\", len(self._file_paths), self._dag_directory)self.set_file_paths(self._file_paths)try:self.log.debug(\"\")self.clear_nonexistent_import_errors()except Exception:self.log.exception(\"\")", "docstring": "Refresh file paths from dag dir if we haven't done it for too long.", "id": "f9493:c6:m5"} {"signature": "def processing_count(self):", "body": "return len(self._processors)", "docstring": ":return: the number of files currently being processed\n:rtype: int", "id": "f9493:c6:m17"} {"signature": "@property@abstractmethoddef file_path(self):", "body": "raise NotImplementedError()", "docstring": ":return: the path to the file that this is processing\n:rtype: unicode", "id": "f9493:c3:m7"} {"signature": "def harvest_simple_dags(self):", "body": "self._sync_metadata()self._heartbeat_manager()simple_dags = []if sys.platform == \"\":qsize = self._result_countelse:qsize = self._result_queue.qsize()for _ in range(qsize):simple_dags.append(self._result_queue.get())self._result_count = return simple_dags", "docstring": "Harvest DAG parsing results from result queue and sync metadata from stat queue.\n:return: List of parsing result in SimpleDag format.", "id": "f9493:c5:m5"} {"signature": "@abstractmethoddef terminate(self, sigkill=False):", "body": "raise NotImplementedError()", "docstring": "Terminate (and then kill) the process launched to process the file", "id": "f9493:c3:m1"} {"signature": "@provide_sessiondef clear_nonexistent_import_errors(self, session):", "body": "query = session.query(errors.ImportError)if self._file_paths:query = query.filter(~errors.ImportError.filename.in_(self._file_paths))query.delete(synchronize_session='')session.commit()", "docstring": "Clears import errors for files that no longer exist.\n\n:param session: session for ORM operations\n:type session: sqlalchemy.orm.session.Session", "id": "f9493:c6:m7"} {"signature": "def __init__(self,dag_directory,file_paths,max_runs,processor_factory,signal_conn,stat_queue,result_queue,async_mode=True):", "body": "self._file_paths = file_pathsself._file_path_queue = []self._dag_directory = dag_directoryself._max_runs = max_runsself._processor_factory = processor_factoryself._signal_conn = signal_connself._stat_queue = stat_queueself._result_queue = result_queueself._async_mode = async_modeself._parallelism = conf.getint('', '')if '' in conf.get('', '') and self._parallelism > :self.log.error(\"\"\"\")self._parallelism = self._file_process_interval = conf.getint('','')self.print_stats_interval = conf.getint('','')self._zombie_threshold_secs = (conf.getint('', ''))self._processors = {}self._last_runtime = {}self._last_finish_time = {}self._last_zombie_query_time = timezone.utcnow()self.last_dag_dir_refresh_time = timezone.utcnow()self.last_stat_print_time = timezone.datetime(, , )self._zombie_query_interval = self._run_count = defaultdict(int)self._heart_beat_key = ''self.dag_dir_list_interval = conf.getint('','')self._log = logging.getLogger('')signal.signal(signal.SIGINT, self._exit_gracefully)signal.signal(signal.SIGTERM, self._exit_gracefully)", "docstring": ":param dag_directory: Directory where DAG definitions are kept. All\n files in file_paths should be under this directory\n:type dag_directory: unicode\n:param file_paths: list of file paths that contain DAG definitions\n:type file_paths: list[unicode]\n:param max_runs: The number of times to parse and schedule each file. -1\n for unlimited.\n:type max_runs: int\n:param processor_factory: function that creates processors for DAG\n definition files. Arguments are (dag_definition_path)\n:type processor_factory: (unicode, unicode, list) -> (AbstractDagFileProcessor)\n:param signal_conn: connection to communicate signal with processor agent.\n:type signal_conn: airflow.models.connection.Connection\n:param stat_queue: the queue to use for passing back parsing stat to agent.\n:type stat_queue: multiprocessing.Queue\n:param result_queue: the queue to use for passing back the result to agent.\n:type result_queue: multiprocessing.Queue\n:param async_mode: whether to start the manager in async mode\n:type async_mode: bool", "id": "f9493:c6:m0"} {"signature": "def get_last_finish_time(self, file_path):", "body": "return self._last_finish_time.get(file_path)", "docstring": ":param file_path: the path to the file that was processed\n:type file_path: unicode\n:return: the finish time of the process of the last run, or None if the\n file was never processed.\n:rtype: datetime", "id": "f9493:c6:m14"} {"signature": "def correct_maybe_zipped(fileloc):", "body": "_, archive, filename = re.search(r''.format(re.escape(os.sep)), fileloc).groups()if archive and zipfile.is_zipfile(archive):return archiveelse:return fileloc", "docstring": "If the path contains a folder with a .zip suffix, then\nthe folder is treated as a zip archive and path to zip is returned.", "id": "f9493:m0"} {"signature": "def terminate(self):", "body": "for processor in self._processors.values():processor.terminate()", "docstring": "Stops all running processors\n:return: None", "id": "f9493:c6:m22"} {"signature": "def default_action_log(log, **_):", "body": "with create_session() as session:session.add(log)", "docstring": "A default action logger callback that behave same as www.utils.action_logging\nwhich uses global session and pushes log ORM object.\n:param log: An log ORM instance\n:param **_: other keyword arguments that is not being used by this function\n:return: None", "id": "f9494:m4"} {"signature": "def register_post_exec_callback(action_logger):", "body": "logging.debug(\"\", action_logger)__post_exec_callbacks.append(action_logger)", "docstring": "Registers more action_logger function callback for post-execution.\nThis function callback is expected to be called with keyword args.\nFor more about the arguments that is being passed to the callback,\nrefer to airflow.utils.cli.action_logging()\n:param action_logger: An action logger function\n:return: None", "id": "f9494:m1"} {"signature": "def get_first(self, hql, parameters=None):", "body": "try:return super().get_first(self._strip_sql(hql), parameters)except DatabaseError as e:raise PrestoException(self._get_pretty_exception_message(e))", "docstring": "Returns only the first row, regardless of how many rows the query\nreturns.", "id": "f9501:c1:m4"} {"signature": "def run(self, hql, parameters=None):", "body": "return super().run(self._strip_sql(hql), parameters)", "docstring": "Execute the statement against Presto. Can be used to create views.", "id": "f9501:c1:m6"} {"signature": "def set_autocommit(self, conn, autocommit):", "body": "if not self.supports_autocommit and autocommit:self.log.warn((\"\"\"\"),getattr(self, self.conn_name_attr))conn.autocommit = autocommit", "docstring": "Sets the autocommit flag on the connection", "id": "f9502:c0:m8"} {"signature": "def run_and_check(self, session, prepped_request, extra_options):", "body": "extra_options = extra_options or {}try:response = session.send(prepped_request,stream=extra_options.get(\"\", False),verify=extra_options.get(\"\", True),proxies=extra_options.get(\"\", {}),cert=extra_options.get(\"\"),timeout=extra_options.get(\"\"),allow_redirects=extra_options.get(\"\", True))if extra_options.get('', True):self.check_response(response)return responseexcept requests.exceptions.ConnectionError as ex:self.log.warn(str(ex) + '')raise ex", "docstring": "Grabs extra options like timeout and actually runs the request,\nchecking for the result\n\n:param session: the session to be used to execute the request\n:type session: requests.Session\n:param prepped_request: the prepared request generated in run()\n:type prepped_request: session.prepare_request\n:param extra_options: additional options to be used when executing the request\n i.e. {'check_response': False} to avoid checking raising exceptions on non 2XX\n or 3XX status codes\n:type extra_options: dict", "id": "f9503:c0:m4"} {"signature": "def insert_rows(self, table, rows, target_fields=None, commit_every=):", "body": "if target_fields:target_fields = ''.join(target_fields)target_fields = ''.format(target_fields)else:target_fields = ''conn = self.get_conn()cur = conn.cursor()if self.supports_autocommit:cur.execute('')conn.commit()i = for row in rows:i += lst = []for cell in row:if isinstance(cell, basestring):lst.append(\"\" + str(cell).replace(\"\", \"\") + \"\")elif cell is None:lst.append('')elif type(cell) == float andnumpy.isnan(cell): lst.append('')elif isinstance(cell, numpy.datetime64):lst.append(\"\" + str(cell) + \"\")elif isinstance(cell, datetime):lst.append(\"\" +cell.strftime('') +\"\")else:lst.append(str(cell))values = tuple(lst)sql = ''''.format(table,target_fields,''.join(values))cur.execute(sql)if i % commit_every == :conn.commit()self.log.info('', i, table)conn.commit()cur.close()conn.close()self.log.info('', i)", "docstring": "A generic way to insert a set of tuples into a table,\nthe whole set of inserts is treated as one transaction\nChanges from standard DbApiHook implementation:\n\n- Oracle SQL queries in cx_Oracle can not be terminated with a semicolon (`;`)\n- Replace NaN values with NULL using `numpy.nan_to_num` (not using\n `is_nan()` because of input types error for strings)\n- Coerce datetime cells to Oracle DATETIME format during insert\n\n:param table: target Oracle table, use dot notation to target a\n specific database\n:type table: str\n:param rows: the rows to insert into the table\n:type rows: iterable of tuples\n:param target_fields: the names of the columns to fill in the table\n:type target_fields: iterable of str\n:param commit_every: the maximum number of rows to insert in one transaction\n Default 1000, Set greater than 0.\n Set 1 to insert each row in each single transaction\n:type commit_every: int", "id": "f9504:c0:m1"} {"signature": "def select_key(self, key, bucket_name=None,expression='',expression_type='',input_serialization=None,output_serialization=None):", "body": "if input_serialization is None:input_serialization = {'': {}}if output_serialization is None:output_serialization = {'': {}}if not bucket_name:(bucket_name, key) = self.parse_s3_url(key)response = self.get_conn().select_object_content(Bucket=bucket_name,Key=key,Expression=expression,ExpressionType=expression_type,InputSerialization=input_serialization,OutputSerialization=output_serialization)return ''.join(event[''][''].decode('')for event in response['']if '' in event)", "docstring": "Reads a key with S3 Select.\n\n:param key: S3 key that will point to the file\n:type key: str\n:param bucket_name: Name of the bucket in which the file is stored\n:type bucket_name: str\n:param expression: S3 Select expression\n:type expression: str\n:param expression_type: S3 Select expression type\n:type expression_type: str\n:param input_serialization: S3 Select input data serialization format\n:type input_serialization: dict\n:param output_serialization: S3 Select output data serialization format\n:type output_serialization: dict\n:return: retrieved subset of original data by S3 Select\n:rtype: str\n\n.. seealso::\n For more details about S3 Select parameters:\n http://boto3.readthedocs.io/en/latest/reference/services/s3.html#S3.Client.select_object_content", "id": "f9505:c0:m11"} {"signature": "def check_for_prefix(self, bucket_name, prefix, delimiter):", "body": "prefix = prefix + delimiter if prefix[-] != delimiter else prefixprefix_split = re.split(r''.format(d=delimiter), prefix, )previous_level = prefix_split[]plist = self.list_prefixes(bucket_name, previous_level, delimiter)return False if plist is None else prefix in plist", "docstring": "Checks that a prefix exists in a bucket\n\n:param bucket_name: the name of the bucket\n:type bucket_name: str\n:param prefix: a key prefix\n:type prefix: str\n:param delimiter: the delimiter marks key hierarchy.\n:type delimiter: str", "id": "f9505:c0:m5"} {"signature": "def get_conn(self):", "body": "conn = self.get_connection(self.mysql_conn_id)conn_config = {\"\": conn.login,\"\": conn.password or '',\"\": conn.host or '',\"\": self.schema or conn.schema or ''}if not conn.port:conn_config[\"\"] = else:conn_config[\"\"] = int(conn.port)if conn.extra_dejson.get('', False):conn_config[\"\"] = conn.extra_dejson[\"\"]if (conn_config[\"\"]).lower() == '' or(conn_config[\"\"]).lower() == '':conn_config[\"\"] = Trueif conn.extra_dejson.get('', False):if (conn.extra_dejson[\"\"]).lower() == '':conn_config[\"\"] = MySQLdb.cursors.SSCursorelif (conn.extra_dejson[\"\"]).lower() == '':conn_config[\"\"] = MySQLdb.cursors.DictCursorelif (conn.extra_dejson[\"\"]).lower() == '':conn_config[\"\"] = MySQLdb.cursors.SSDictCursorlocal_infile = conn.extra_dejson.get('', False)if conn.extra_dejson.get('', False):dejson_ssl = conn.extra_dejson['']if isinstance(dejson_ssl, six.string_types):dejson_ssl = json.loads(dejson_ssl)conn_config[''] = dejson_sslif conn.extra_dejson.get(''):conn_config[''] = conn.extra_dejson['']if local_infile:conn_config[\"\"] = conn = MySQLdb.connect(**conn_config)return conn", "docstring": "Returns a mysql connection object", "id": "f9506:c0:m3"} {"signature": "def run_cli(self, pig, verbose=True):", "body": "with TemporaryDirectory(prefix='') as tmp_dir:with NamedTemporaryFile(dir=tmp_dir) as f:f.write(pig.encode(''))f.flush()fname = f.namepig_bin = ''cmd_extra = []pig_cmd = [pig_bin, '', fname] + cmd_extraif self.pig_properties:pig_properties_list = self.pig_properties.split()pig_cmd.extend(pig_properties_list)if verbose:self.log.info(\"\", \"\".join(pig_cmd))sp = subprocess.Popen(pig_cmd,stdout=subprocess.PIPE,stderr=subprocess.STDOUT,cwd=tmp_dir,close_fds=True)self.sp = spstdout = ''for line in iter(sp.stdout.readline, b''):stdout += line.decode('')if verbose:self.log.info(line.strip())sp.wait()if sp.returncode:raise AirflowException(stdout)return stdout", "docstring": "Run an pig script using the pig cli\n\n>>> ph = PigCliHook()\n>>> result = ph.run_cli(\"ls /;\")\n>>> (\"hdfs://\" in result)\nTrue", "id": "f9508:c0:m1"} {"signature": "def get_conn(self):", "body": "effective_user = self.proxy_userautoconfig = self.autoconfiguse_sasl = configuration.conf.get('', '') == ''try:connections = self.get_connections(self.hdfs_conn_id)if not effective_user:effective_user = connections[].loginif not autoconfig:autoconfig = connections[].extra_dejson.get('',False)hdfs_namenode_principal = connections[].extra_dejson.get('')except AirflowException:if not autoconfig:raiseif autoconfig:client = AutoConfigClient(effective_user=effective_user,use_sasl=use_sasl)elif len(connections) == :client = Client(connections[].host, connections[].port,effective_user=effective_user, use_sasl=use_sasl,hdfs_namenode_principal=hdfs_namenode_principal)elif len(connections) > :nn = [Namenode(conn.host, conn.port) for conn in connections]client = HAClient(nn, effective_user=effective_user,use_sasl=use_sasl,hdfs_namenode_principal=hdfs_namenode_principal)else:raise HDFSHookException(\"\"\"\")return client", "docstring": "Returns a snakebite HDFSClient object.", "id": "f9515:c1:m1"} {"signature": "def bulk_load(self, table, tmp_file):", "body": "self.copy_expert(\"\".format(table=table), tmp_file)", "docstring": "Loads a tab-delimited file into a database table", "id": "f9517:c0:m3"} {"signature": "@staticmethoddef _serialize_cell(cell, conn):", "body": "return cell", "docstring": "Postgresql will adapt all arguments to the execute() method internally,\nhence we return cell without any conversion.\n\nSee http://initd.org/psycopg/docs/advanced.html#adapting-new-types for\nmore information.\n\n:param cell: The cell to insert into the table\n:type cell: object\n:param conn: The database connection\n:type conn: connection object\n:return: The cell\n:rtype: object", "id": "f9517:c0:m5"} {"signature": "def bulk_dump(self, table, tmp_file):", "body": "self.copy_expert(\"\".format(table=table), tmp_file)", "docstring": "Dumps a database table into a tab-delimited file", "id": "f9517:c0:m4"} {"signature": "def __handle_rate_limit_exception(self, rate_limit_exception):", "body": "retry_after = int(rate_limit_exception.response.headers.get('', ))self.log.info(\"\",retry_after)time.sleep(retry_after)", "docstring": "Sleep for the time specified in the exception. If not specified, wait\nfor 60 seconds.", "id": "f9519:c0:m2"} {"signature": "def get_conn(self):", "body": "conn = self.get_connection(self.sqlite_conn_id)conn = sqlite3.connect(conn.host)return conn", "docstring": "Returns a sqlite connection object", "id": "f9520:c0:m0"} {"signature": "def get_metastore_client(self):", "body": "import hmsclientfrom thrift.transport import TSocket, TTransportfrom thrift.protocol import TBinaryProtocolms = self.metastore_connauth_mechanism = ms.extra_dejson.get('', '')if configuration.conf.get('', '') == '':auth_mechanism = ms.extra_dejson.get('', '')kerberos_service_name = ms.extra_dejson.get('', '')socket = TSocket.TSocket(ms.host, ms.port)if configuration.conf.get('', '') == ''and auth_mechanism == '':try:import saslwrapper as saslexcept ImportError:import sasldef sasl_factory():sasl_client = sasl.Client()sasl_client.setAttr(\"\", ms.host)sasl_client.setAttr(\"\", kerberos_service_name)sasl_client.init()return sasl_clientfrom thrift_sasl import TSaslClientTransporttransport = TSaslClientTransport(sasl_factory, \"\", socket)else:transport = TTransport.TBufferedTransport(socket)protocol = TBinaryProtocol.TBinaryProtocol(transport)return hmsclient.HMSClient(iprot=protocol)", "docstring": "Returns a Hive thrift client.", "id": "f9521:c1:m3"} {"signature": "def get_partitions(self, schema, table_name, filter=None):", "body": "with self.metastore as client:table = client.get_table(dbname=schema, tbl_name=table_name)if len(table.partitionKeys) == :raise AirflowException(\"\")else:if filter:parts = client.get_partitions_by_filter(db_name=schema, tbl_name=table_name,filter=filter, max_parts=HiveMetastoreHook.MAX_PART_COUNT)else:parts = client.get_partitions(db_name=schema, tbl_name=table_name,max_parts=HiveMetastoreHook.MAX_PART_COUNT)pnames = [p.name for p in table.partitionKeys]return [dict(zip(pnames, p.values)) for p in parts]", "docstring": "Returns a list of all partitions in a table. Works only\nfor tables with less than 32767 (java short max val).\nFor subpartitioned table, the number might easily exceed this.\n\n>>> hh = HiveMetastoreHook()\n>>> t = 'static_babynames_partitioned'\n>>> parts = hh.get_partitions(schema='airflow', table_name=t)\n>>> len(parts)\n1\n>>> parts\n[{'ds': '2015-01-01'}]", "id": "f9521:c1:m10"} {"signature": "def get_results(self, hql, schema='', fetch_size=None, hive_conf=None):", "body": "results_iter = self._get_results(hql, schema,fetch_size=fetch_size, hive_conf=hive_conf)header = next(results_iter)results = {'': list(results_iter),'': header}return results", "docstring": "Get results of the provided hql in target schema.\n\n:param hql: hql to be executed.\n:type hql: str or list\n:param schema: target schema, default to 'default'.\n:type schema: str\n:param fetch_size: max size of result to fetch.\n:type fetch_size: int\n:param hive_conf: hive_conf to execute alone with the hql.\n:type hive_conf: dict\n:return: results of hql execution, dict with data (list of results) and header\n:rtype: dict", "id": "f9521:c2:m3"} {"signature": "def table_exists(self, table_name, db=''):", "body": "try:self.get_table(table_name, db)return Trueexcept Exception:return False", "docstring": "Check if table exists\n\n>>> hh = HiveMetastoreHook()\n>>> hh.table_exists(db='airflow', table_name='static_babynames')\nTrue\n>>> hh.table_exists(db='airflow', table_name='does_not_exist')\nFalse", "id": "f9521:c1:m13"} {"signature": "def set_date_flag(self, date_flag=False):", "body": "self.date_flag = date_flag", "docstring": "Set date flag", "id": "f9523:c0:m7"} {"signature": "@cli_utils.action_loggingdef dag_state(args):", "body": "dag = get_dag(args)dr = DagRun.find(dag.dag_id, execution_date=args.execution_date)print(dr[].state if len(dr) > else None)", "docstring": "Returns the state of a DagRun at the command line.\n>>> airflow dag_state tutorial 2015-01-01T00:00:00.000000\nrunning", "id": "f9538:m23"} {"signature": "def run_command(command):", "body": "process = subprocess.Popen(shlex.split(command),stdout=subprocess.PIPE,stderr=subprocess.PIPE,close_fds=True)output, stderr = [stream.decode(sys.getdefaultencoding(), '')for stream in process.communicate()]if process.returncode != :raise AirflowConfigException(\"\".format(command, process.returncode, output, stderr))return output", "docstring": "Runs command and returns stdout", "id": "f9540:m2"} {"signature": "def get_pools(self):", "body": "raise NotImplementedError()", "docstring": "Get all pools.", "id": "f9541:c0:m4"} {"signature": "def create_pool(self, name, slots, description):", "body": "raise NotImplementedError()", "docstring": "Create a pool.\n\n :param name: pool name\n :param slots: pool slots amount\n :param description: pool description", "id": "f9541:c0:m5"} {"signature": "def get_dag_run_state(dag_id, execution_date):", "body": "dagbag = DagBag()if dag_id not in dagbag.dags:error_message = \"\".format(dag_id)raise DagNotFound(error_message)dag = dagbag.get_dag(dag_id)dagrun = dag.get_dagrun(execution_date=execution_date)if not dagrun:error_message = (''.format(execution_date, dag_id))raise DagRunNotFound(error_message)return {'': dagrun.get_state()}", "docstring": "Return the task object identified by the given dag_id and task_id.", "id": "f9547:m0"} {"signature": "def push_by_returning(**kwargs):", "body": "return value_2", "docstring": "Pushes an XCom without a specific target, just by returning it", "id": "f9563:m1"} {"signature": "def ds_format(ds, input_format, output_format):", "body": "return datetime.strptime(ds, input_format).strftime(output_format)", "docstring": "Takes an input string and outputs another string\nas specified in the output format\n\n:param ds: input string which contains a date\n:type ds: str\n:param input_format: input string format. E.g. %Y-%m-%d\n:type input_format: str\n:param output_format: output string format E.g. %Y-%m-%d\n:type output_format: str\n\n>>> ds_format('2015-01-01', \"%Y-%m-%d\", \"%m-%d-%y\")\n'01-01-15'\n>>> ds_format('1/5/2015', \"%m/%d/%Y\", \"%Y-%m-%d\")\n'2015-01-05'", "id": "f9577:m1"} {"signature": "def closest_ds_partition(table, ds, before=True, schema=\"\",metastore_conn_id=''):", "body": "from airflow.hooks.hive_hooks import HiveMetastoreHookif '' in table:schema, table = table.split('')hh = HiveMetastoreHook(metastore_conn_id=metastore_conn_id)partitions = hh.get_partitions(schema=schema, table_name=table)if not partitions:return Nonepart_vals = [list(p.values())[] for p in partitions]if ds in part_vals:return dselse:parts = [datetime.datetime.strptime(pv, '')for pv in part_vals]target_dt = datetime.datetime.strptime(ds, '')closest_ds = _closest_date(target_dt, parts, before_target=before)return closest_ds.isoformat()", "docstring": "This function finds the date in a list closest to the target date.\nAn optional parameter can be given to get the closest before or after.\n\n:param table: A hive table name\n:type table: str\n:param ds: A datestamp ``%Y-%m-%d`` e.g. ``yyyy-mm-dd``\n:type ds: list[datetime.date]\n:param before: closest before (True), after (False) or either side of ds\n:type before: bool or None\n:returns: The closest date\n:rtype: str or None\n\n>>> tbl = 'airflow.static_babynames_partitioned'\n>>> closest_ds_partition(tbl, '2015-01-02')\n'2015-01-01'", "id": "f9578:m2"} {"signature": "def prepare_classpath():", "body": "if DAGS_FOLDER not in sys.path:sys.path.append(DAGS_FOLDER)config_path = os.path.join(AIRFLOW_HOME, '')if config_path not in sys.path:sys.path.append(config_path)if PLUGINS_FOLDER not in sys.path:sys.path.append(PLUGINS_FOLDER)", "docstring": "Ensures that certain subfolders of AIRFLOW_HOME are on the classpath", "id": "f9579:m7"} {"signature": "def get_default_executor():", "body": "global DEFAULT_EXECUTORif DEFAULT_EXECUTOR is not None:return DEFAULT_EXECUTORexecutor_name = configuration.conf.get('', '')DEFAULT_EXECUTOR = _get_executor(executor_name)log = LoggingMixin().loglog.info(\"\", executor_name)return DEFAULT_EXECUTOR", "docstring": "Creates a new instance of the configured executor if none exists and returns it", "id": "f9583:m1"} {"signature": "def __init__(self, parallelism=PARALLELISM):", "body": "self.parallelism = parallelismself.queued_tasks = OrderedDict()self.running = {}self.event_buffer = {}", "docstring": "Class to derive in order to interface with executor-type systems\nlike Celery, Yarn and the likes.\n\n:param parallelism: how many jobs should run at one time. Set to\n ``0`` for infinity\n:type parallelism: int", "id": "f9584:c0:m0"} {"signature": "def has_task(self, task_instance):", "body": "if task_instance.key in self.queued_tasks or task_instance.key in self.running:return True", "docstring": "Checks if a task is either queued or running in this executor\n\n:param task_instance: TaskInstance\n:return: True if the task is known to this executor", "id": "f9584:c0:m4"} {"signature": "def _num_tasks_per_send_process(self, to_send_count):", "body": "return max(,int(math.ceil( * to_send_count / self._sync_parallelism)))", "docstring": "How many Celery tasks should each worker process send.\n\n:return: Number of tasks that should be sent per process\n:rtype: int", "id": "f9585:c1:m2"} {"signature": "def poke(self, context):", "body": "raise AirflowException('')", "docstring": "Function that the sensors defined while deriving this class should\noverride.", "id": "f9599:c0:m2"} {"signature": "@property@abstractmethoddef concurrency(self):", "body": "raise NotImplementedError()", "docstring": ":return: maximum number of tasks that can run simultaneously from this DAG\n:rtype: int", "id": "f9600:c0:m3"} {"signature": "@property@abstractmethoddef task_ids(self):", "body": "raise NotImplementedError()", "docstring": ":return: A list of task IDs that are in this DAG\n:rtype: List[unicode]", "id": "f9600:c0:m1"} {"signature": "def construct_ingest_query(self, static_path, columns):", "body": "num_shards = self.num_shardstarget_partition_size = self.target_partition_sizeif self.target_partition_size == -:if self.num_shards == -:target_partition_size = DEFAULT_TARGET_PARTITION_SIZEelse:num_shards = -metric_names = [m[''] for m in self.metric_spec if m[''] != '']dimensions = [c for c in columns if c not in metric_names and c != self.ts_dim]ingest_query_dict = {\"\": \"\",\"\": {\"\": {\"\": self.metric_spec,\"\": {\"\": self.query_granularity,\"\": self.intervals,\"\": \"\",\"\": self.segment_granularity,},\"\": {\"\": \"\",\"\": {\"\": columns,\"\": {\"\": [],\"\": dimensions, \"\": []},\"\": {\"\": self.ts_dim,\"\": \"\"},\"\": \"\"}},\"\": self.druid_datasource},\"\": {\"\": \"\",\"\": {\"\": \"\",\"\": \"\",\"\": \"\",},\"\": {\"\": \"\",\"\": target_partition_size,\"\": num_shards,},},\"\": {\"\": {\"\": static_path,\"\": \"\"},\"\": \"\"}}}if self.job_properties:ingest_query_dict[''][''][''].update(self.job_properties)if self.hadoop_dependency_coordinates:ingest_query_dict['']= self.hadoop_dependency_coordinatesreturn ingest_query_dict", "docstring": "Builds an ingest query for an HDFS TSV load.\n\n:param static_path: The path on hdfs where the data is\n:type static_path: str\n:param columns: List of all the columns that are available\n:type columns: list", "id": "f9601:c0:m2"} {"signature": "def _convert_to_float_if_possible(s):", "body": "try:ret = float(s)except (ValueError, TypeError):ret = sreturn ret", "docstring": "A small helper function to convert a string to a numeric value\nif appropriate\n\n:param s: the string to be converted\n:type s: str", "id": "f9606:m0"} {"signature": "def get_db_hook(self):", "body": "return DruidDbApiHook(druid_broker_conn_id=self.druid_broker_conn_id)", "docstring": "Return the druid db api hook.", "id": "f9619:c0:m1"} {"signature": "def template_field_role(app, typ, rawtext, text, lineno, inliner, options={}, content=[]):", "body": "text = utils.unescape(text)try:template_fields = get_template_field(app.env, text)except RoleException as e:msg = inliner.reporter.error(\"\" % (text, e, ), line=lineno)prb = inliner.problematic(rawtext, rawtext, msg)return [prb], [msg]node = nodes.inline(rawtext=rawtext)for i, field in enumerate(template_fields):if i != :node += nodes.Text(\"\")node += nodes.literal(field, \"\", nodes.Text(field))return [node], []", "docstring": "A role that allows you to include a list of template fields in the middle of the text. This is especially\nuseful when writing guides describing how to use the operator.\nThe result is a list of fields where each field is shorted in the literal block.\n\nSample usage::\n\n:template-fields:`airflow.contrib.operators.gcp_natural_language_operator.CloudLanguageAnalyzeSentimentOperator`\n\nFor further information look at:\n\n* [http://docutils.sourceforge.net/docs/howto/rst-roles.html](Creating reStructuredText Interpreted\n Text Roles)", "id": "f9637:m1"} {"signature": "def tube_hires(script, height=, radius=None, radius1=None, radius2=None,diameter=None, diameter1=None, diameter2=None, cir_segments=,rad_segments=, height_segments=, center=False,simple_bottom=False, color=None):", "body": "if radius is not None and diameter is None:if radius1 is None and diameter1 is None:radius1 = radiusif radius2 is None and diameter2 is None:radius2 = if diameter is not None:if radius1 is None and diameter1 is None:radius1 = diameter / if radius2 is None and diameter2 is None:radius2 = if diameter1 is not None:radius1 = diameter1 / if diameter2 is not None:radius2 = diameter2 / if radius1 is None:radius1 = if radius2 is None:radius2 = annulus_hires(script,radius1=radius1,radius2=radius2,cir_segments=cir_segments,rad_segments=rad_segments)transform.translate(script, [, , height])if simple_bottom:annulus(script,radius1=radius1,radius2=radius2,cir_segments=cir_segments)else:layers.duplicate(script)transform.translate(script, [, , -height])transform.rotate(script, '', )cylinder_open_hires(script, height, radius1,cir_segments=cir_segments,height_segments=height_segments)if radius2 != :cylinder_open_hires(script, height, radius2,cir_segments=cir_segments,height_segments=height_segments,invert_normals=True)layers.join(script)clean.merge_vert(script, threshold=)if center:transform.translate(script, [, , -height / ])if color is not None:vert_color.function(script, color=color)return None", "docstring": "Create a cylinder with user defined number of segments", "id": "f9642:m14"} {"signature": "def torus(script, major_radius=, minor_radius=, inner_diameter=None,outer_diameter=None, major_segments=, minor_segments=,color=None):", "body": "if inner_diameter is not None and outer_diameter is not None:major_radius = (inner_diameter + outer_diameter) / minor_radius = major_radius - inner_diameter / filter_xml = ''.join(['','','' % major_radius,'','','','','' % minor_radius,'','','','','' % major_segments,'','','','','' % minor_segments,'','','',''])util.write_filter(script, filter_xml)if isinstance(script, FilterScript):script.add_layer('', change_layer=True)if color is not None:vert_color.function(script, color=color)return None", "docstring": "Create a torus mesh\n\n Args:\n major_radius (float, (optional)): radius from the origin to the\n center of the cross sections\n minor_radius (float, (optional)): radius of the torus cross\n section\n inner_diameter (float, (optional)): inner diameter of torus. If\n both inner_diameter and outer_diameter are provided then\n these will override major_radius and minor_radius.,\n outer_diameter (float, (optional)): outer diameter of torus. If\n both inner_diameter and outer_diameter are provided then\n these will override major_radius and minor_radius.\n major_segments (int (optional)): number of segments for the main\n ring of the torus\n minor_segments (int (optional)): number of segments for the minor\n ring of the torus\n color (str (optional)): color name to apply vertex colors to the\n newly created mesh\n\n Returns:\n None", "id": "f9642:m4"} {"signature": "def cube_open_hires_old(script, size=, x_segments=, y_segments=, z_segments=,center=False, color=None):", "body": "\"\"\"\"\"\"size = util.make_list(size, )grid(script, [size[], size[]],x_segments=x_segments,y_segments=z_segments)transform.rotate(script, '', )layers.duplicate(script)transform.rotate(script, '', )transform.translate(script, [size[], size[], ])grid(script, [size[], size[]],x_segments=z_segments,y_segments=y_segments)transform.rotate(script, '', -)layers.duplicate(script)transform.rotate(script, '', )transform.translate(script, [size[], size[], ])layers.join(script)clean.merge_vert(script, threshold=)if center:transform.translate(script, [-size[] / , -size[] / , -size[] / ])if color is not None:vert_color.function(script, color=color)return None", "docstring": "Creates a square open tube, e.g. a box with no top or bottom.\n\n Useful if you want to wrap it around and join the open ends together, forming a torus.", "id": "f9642:m8"} {"signature": "def cube(script, size=, center=False, color=None):", "body": "\"\"\"\"\"\"size = util.make_list(size, )if script.ml_version == '':filter_name = ''else:filter_name = ''filter_xml = ''.join([''.format(filter_name),'','','','','',''])util.write_filter(script, filter_xml)if isinstance(script, FilterScript):script.add_layer('', change_layer=True)transform.scale(script, value=size)if not center:transform.translate(script, value=[size[]/, size[]/, size[]/])if color is not None:vert_color.function(script, color=color)return None", "docstring": "Create a cube primitive\n\n Note that this is made of 6 quads, not triangles", "id": "f9642:m0"} {"signature": "def dna(sequence=''):", "body": "pass", "docstring": "Create doublehelix function, takes spacing between helixes and\n rungs, rung diameter", "id": "f9642:m16"} {"signature": "def annulus(script, radius=None, radius1=None, radius2=None, diameter=None,diameter1=None, diameter2=None, cir_segments=, color=None):", "body": "if radius is not None and diameter is None:if radius1 is None and diameter1 is None:radius1 = radiusif radius2 is None and diameter2 is None:radius2 = if diameter is not None:if radius1 is None and diameter1 is None:radius1 = diameter / if radius2 is None and diameter2 is None:radius2 = if diameter1 is not None:radius1 = diameter1 / if diameter2 is not None:radius2 = diameter2 / if radius1 is None:radius1 = if radius2 is None:radius2 = filter_xml = ''.join(['','','' % radius1,'','','','','' % radius2,'','','','','' % cir_segments,'','','',''])util.write_filter(script, filter_xml)if isinstance(script, FilterScript):script.add_layer('', change_layer=True)if color is not None:vert_color.function(script, color=color)return None", "docstring": "Create a 2D (surface) circle or annulus\n radius1=1 # Outer radius of the circle\n radius2=0 # Inner radius of the circle (if non-zero it creates an annulus)\n color=\"\" # specify a color name to apply vertex colors to the newly created mesh\n\n OpenSCAD: parameters: diameter overrides radius, radius1 & radius2 override radius", "id": "f9642:m6"} {"signature": "def grid(script, size=, x_segments=, y_segments=, center=False,color=None):", "body": "size = util.make_list(size, )filter_xml = ''.join(['','',''.format(size[]),'','','','',''.format(size[]),'','','','',''.format(x_segments + ),'','','','',''.format(y_segments + ),'','','','','','','','',''])util.write_filter(script, filter_xml)if isinstance(script, FilterScript):script.add_layer('', change_layer=True)\"\"\"\"\"\"transform.vert_function(script, z_func='')\"\"\"\"\"\"if center:transform.translate(script, value=[size[]/, -size[]/, ])else:transform.translate(script, value=[size[], , ])if color is not None:vert_color.function(script, color=color)return None", "docstring": "2D square/plane/grid created on XY plane\n\n x_segments # Number of segments in the X direction.\n y_segments # Number of segments in the Y direction.\n center=\"false\" # If true square will be centered on origin;\n otherwise it is place in the positive XY quadrant.", "id": "f9642:m5"} {"signature": "def flip(script, force_flip=False, selected=False):", "body": "filter_xml = ''.join(['','',''.format(str(force_flip).lower()),'','','','',''.format(str(selected).lower()),'','','',''])util.write_filter(script, filter_xml)return None", "docstring": "Invert faces orientation, flipping the normals of the mesh.\n\n If requested, it tries to guess the right orientation; mainly it decides to\n flip all the faces if the minimum/maximum vertexes have not outward point\n normals for a few directions. Works well for single component watertight\n objects.\n\n Args:\n script: the FilterScript object or script filename to write\n the filter to.\n force_flip (bool): If selected, the normals will always be flipped;\n otherwise, the filter tries to set them outside.\n selected (bool): If selected, only selected faces will be affected.\n\n Layer stack:\n No impacts\n\n MeshLab versions:\n 2016.12\n 1.3.4BETA", "id": "f9644:m1"} {"signature": "def fix(script):", "body": "reorient(script)flip(script)return", "docstring": "Will reorient normals & ensure they are oriented outwards\n\n Layer stack:\n No impacts\n\n MeshLab versions:\n 2016.12\n 1.3.4BETA", "id": "f9644:m2"} {"signature": "def function(script, red=, green=, blue=, alpha=, color=None):", "body": "if color is not None:red, green, blue, _ = color_name[color.lower()]filter_xml = ''.join(['','',''.format(str(red).replace('', '').replace('', '')),'','','','',''.format(str(green).replace('', '').replace('', '')),'','','','',''.format(str(blue).replace('', '').replace('', '')),'','','','',''.format(str(alpha).replace('', '').replace('', '')),'','','',''])util.write_filter(script, filter_xml)return None", "docstring": "Color function using muparser lib to generate new RGBA color for every\n vertex\n\n Red, Green, Blue and Alpha channels may be defined by specifying a function\n for each.\n\n See help(mlx.muparser_ref) for muparser reference documentation.\n\n It's possible to use the following per-vertex variables in the expression:\n\n Variables (per vertex):\n x, y, z (coordinates)\n nx, ny, nz (normal)\n r, g, b, a (color)\n q (quality)\n rad (radius)\n vi (vertex index)\n vtu, vtv (texture coordinates)\n ti (texture index)\n vsel (is the vertex selected? 1 yes, 0 no)\n and all custom vertex attributes already defined by user.\n\n Args:\n script: the FilterScript object or script filename to write\n the filter to.\n red (str [0, 255]): function to generate red component\n green (str [0, 255]): function to generate green component\n blue (str [0, 255]): function to generate blue component\n alpha (str [0, 255]): function to generate alpha component\n color (str): name of one of the 140 HTML Color Names defined\n in CSS & SVG.\n Ref: https://en.wikipedia.org/wiki/Web_colors#X11_color_names\n If not None this will override the per component variables.\n\n Layer stack:\n No impacts\n\n MeshLab versions:\n 2016.12\n 1.3.4BETA", "id": "f9645:m0"} {"signature": "def split_vert_on_nonmanifold_face(script, vert_displacement_ratio=):", "body": "filter_xml = ''.join(['','',''.format(vert_displacement_ratio),'','','',''])util.write_filter(script, filter_xml)return None", "docstring": "Split non-manifold vertices until it becomes two-manifold.\n\n Args:\n script: the FilterScript object or script filename to write\n the filter to.\n vert_displacement_ratio (float): When a vertex is split it is moved\n along the average vector going from its position to the centroid\n of the FF connected faces sharing it.\n\n Layer stack:\n No impacts\n\n MeshLab versions:\n 2016.12\n 1.3.4BETA", "id": "f9646:m2"} {"signature": "def close_holes(script, hole_max_edge=, selected=False,sel_new_face=True, self_intersection=True):", "body": "filter_xml = ''.join(['','',''.format(hole_max_edge),'','','','',''.format(str(selected).lower()),'','','','',''.format(str(sel_new_face).lower()),'','','','',''.format(str(self_intersection).lower()),'','','',''])util.write_filter(script, filter_xml)return None", "docstring": "Close holes smaller than a given threshold\n\n Args:\n script: the FilterScript object or script filename to write\n the filter to.\n hole_max_edge (int): The size is expressed as number of edges composing\n the hole boundary.\n selected (bool): Only the holes with at least one of the boundary faces\n selected are closed.\n sel_new_face (bool): After closing a hole the faces that have been\n created are left selected. Any previous selection is lost. Useful\n for example for smoothing or subdividing the newly created holes.\n self_intersection (bool): When closing an holes it tries to prevent the\n creation of faces that intersect faces adjacent to the boundary of\n the hole. It is an heuristic, non intersecting hole filling can be\n NP-complete.\n\n Layer stack:\n No impacts\n\n MeshLab versions:\n 2016.12\n 1.3.4BETA", "id": "f9646:m1"} {"signature": "def tex2vc_2_meshes(script, source_mesh=, target_mesh=, max_distance=):", "body": "if script.ml_version == '':filter_name = ''else:filter_name = ''filter_xml = ''.join([''.format(filter_name),'','' % source_mesh,'','','','','' % target_mesh,'','','','','' % max_distance,'','','','','',''])util.write_filter(script, filter_xml)return None", "docstring": "Transfer texture colors to vertex colors (between 2 meshes)\n\n Args:\n script: the FilterScript object or script filename to write\n the filter to.\n source_mesh (int): The mesh with associated texture that we want to sample from\n target_mesh (int): The mesh whose vertex color will be filled according to source mesh texture\n max_distance (float): Sample points for which we do not find anything within this distance are rejected and not considered for recovering color\n\n Layer stack:\n No impacts\n\n MeshLab versions:\n 2016.12\n 1.3.4BETA", "id": "f9647:m7"} {"signature": "def vc2fc(script):", "body": "filter_xml = ''util.write_filter(script, filter_xml)return None", "docstring": "Transfer vertex colors to face colors\n\n Args:\n script: the FilterScript object or script filename to write\n the filter to.", "id": "f9647:m3"} {"signature": "def fc2vc(script):", "body": "filter_xml = ''util.write_filter(script, filter_xml)return None", "docstring": "Transfer face colors to vertex colors\n\n Args:\n script: the FilterScript object or script filename to write\n the filter to.", "id": "f9647:m2"} {"signature": "def vert_attr2tex_2_meshes(script, source_mesh=, target_mesh=, attribute=,max_distance=, tex_name='',tex_width=, tex_height=,overwrite_tex=True, assign_tex=False,fill_tex=True):", "body": "if script.ml_version == '':filter_name = ''else:filter_name = ''filter_xml = ''.join([''.format(filter_name),'','' % source_mesh,'','','','','' % target_mesh,'','','','','' % attribute,'','','','','','','','','','' % max_distance,'','','','','','','' % tex_name,'','','','','' % tex_width,'','','','','' % tex_height,'','','','','' % str(overwrite_tex).lower(),'','','','','' % str(assign_tex).lower(),'','','','','' % str(fill_tex).lower(),'','','',''])util.write_filter(script, filter_xml)return None", "docstring": "Transfer Vertex Attributes to Texture (between 2 meshes)\n\n Args:\n script: the FilterScript object or script filename to write\n the filter to.\n source_mesh (int): The mesh that contains the source data that we want to transfer\n target_mesh (int): The mesh whose texture will be filled according to source mesh data\n attribute (int): Choose what attribute has to be transferred onto the target texture. You can choose between Per vertex attributes (color, normal, quality) or to transfer color information from source mesh texture\n max_distance (float): Sample points for which we do not find anything within this distance are rejected and not considered for recovering data\n tex_name (str): The texture file to be created\n tex_width (int): The texture width\n tex_height (int): The texture height\n overwrite_tex (bool): If target mesh has a texture will be overwritten (with provided texture dimension)\n assign_tex (bool): Assign the newly created texture to target mesh\n fill_tex (bool): If enabled the unmapped texture space is colored using a pull push filling algorithm, if false is set to black\n\n Layer stack:\n No impacts\n\n MeshLab versions:\n 2016.12\n 1.3.4BETA", "id": "f9647:m6"} {"signature": "def measure_section(fbasename=None, log=None, axis='', offset=,rotate_x_angle=None, ml_version=ml_version):", "body": "ml_script1_file = ''file_out = ''ml_script1 = mlx.FilterScript(file_in=fbasename, file_out=file_out, ml_version=ml_version)if rotate_x_angle is not None:transform.rotate(ml_script1, axis='', angle=rotate_x_angle)compute.section(ml_script1, axis=axis, offset=offset)layers.delete_lower(ml_script1)ml_script1.save_to_file(ml_script1_file)ml_script1.run_script(log=log, script_file=ml_script1_file)aabb = measure_aabb(file_out, log)return aabb", "docstring": "Measure a cross section of a mesh\n\n Perform a plane cut in one of the major axes (X, Y, Z). If you want to cut on\n a different plane you will need to rotate the model in place, perform the cut,\n and rotate it back.\n\n Args:\n fbasename (str): filename of input model\n log (str): filename of log file\n axis (str): axis perpendicular to the cutting plane, e.g. specify \"z\" to cut\n parallel to the XY plane.\n offset (float): amount to offset the cutting plane from the origin\n rotate_x_angle (float): degrees to rotate about the X axis. Useful for correcting \"Up\" direction: 90 to rotate Y to Z, and -90 to rotate Z to Y. \n\n Returns:\n dict: dictionary with the following keys for the aabb of the section:\n min (list): list of the x, y & z minimum values\n max (list): list of the x, y & z maximum values\n center (list): the x, y & z coordinates of the center of the aabb\n size (list): list of the x, y & z sizes (max - min)\n diagonal (float): the diagonal of the aabb", "id": "f9648:m1"} {"signature": "def polylinesort(fbasename=None, log=None):", "body": "fext = os.path.splitext(fbasename)[][:].strip().lower()if fext != '':print('')sys.exit()fread = open(fbasename, '')first = Truepolyline_vertices = []line_segments = []for line in fread:element, x_co, y_co, z_co = line.split()if element == '':polyline_vertices.append([util.to_float(x_co), util.to_float(y_co), util.to_float(z_co)])elif element == '':p1 = x_cop2 = y_coline_segments.append([int(p1), int(p2)])fread.close()if log is not None:log_file = open(log, '')\"\"\"\"\"\"log_file.close()return None", "docstring": "Sort separate line segments in obj format into a continuous polyline or polylines.\n NOT FINISHED; DO NOT USE\n\n Also measures the length of each polyline\n\n Return polyline and polylineMeta (lengths)", "id": "f9648:m2"} {"signature": "def measure_dimension(fbasename=None, log=None, axis1=None, offset1=,axis2=None, offset2=, ml_version=ml_version):", "body": "axis1 = axis1.lower()axis2 = axis2.lower()ml_script1_file = ''file_out = ''ml_script1 = mlx.FilterScript(file_in=fbasename, file_out=file_out, ml_version=ml_version)compute.section(ml_script1, axis1, offset1, surface=True)compute.section(ml_script1, axis2, offset2, surface=False)layers.delete_lower(ml_script1)ml_script1.save_to_file(ml_script1_file)ml_script1.run_script(log=log, script_file=ml_script1_file)for val in ('', '', ''):if val not in (axis1, axis2):axis = valaxis_num = ord(axis) - ord('')aabb = measure_aabb(file_out, log)dimension = {'': aabb[''][axis_num], '': aabb[''][axis_num],'': aabb[''][axis_num], '': axis}if log is None:print('' % fbasename)print('' % (axis, axis1, offset1,axis2, offset2))print('' % (dimension[''],dimension[''], dimension['']))else:log_file = open(log, '')log_file.write('' % fbasename)log_file.write('' % (axis, axis1, offset1,axis2, offset2))log_file.write('' % dimension[''])log_file.write('' % dimension[''])log_file.write('' % dimension[''])log_file.close()return dimension", "docstring": "Measure a dimension of a mesh", "id": "f9648:m6"} {"signature": "def scale(script, value=):", "body": "\"\"\"\"\"\"value = util.make_list(value, )vert_function(script,x_func='' % value[],y_func='' % value[],z_func='' % value[])return None", "docstring": "An alternative scale implementation that uses a geometric function.\n This is more accurate than the built-in version.", "id": "f9649:m5"} {"signature": "def deform2curve(script, curve=mp_func.torus_knot(''), step=):", "body": "curve_step = []for idx, val in enumerate(curve):curve[idx] = val.replace('', '')curve_step.append(val.replace('', ''.format(step)))tangent = mp_func.v_subtract(curve_step, curve)normal1 = mp_func.v_add(curve_step, curve)bee = mp_func.v_cross(tangent, normal1)normal = mp_func.v_cross(bee, tangent)bee = mp_func.v_normalize(bee)normal = mp_func.v_normalize(normal)new_point = mp_func.v_add(mp_func.v_multiply('', normal), mp_func.v_multiply('', bee))function = mp_func.v_add(curve, new_point)vert_function(script, x_func=function[], y_func=function[], z_func=function[])return function", "docstring": "Deform a mesh along a parametric curve function\n\n Provide a parametric curve function with z as the parameter. This will\n deform the xy cross section of the mesh along the curve as z increases.\n\n Source: http://blackpawn.com/texts/pqtorus/\n\n Methodology:\n T = P' - P\n N1 = P' + P\n B = T x N1\n N = B x T\n\n newPoint = point.x*N + point.y*B", "id": "f9649:m17"} {"signature": "def vert_function(script, x_func='', y_func='', z_func='', selected=False):", "body": "if script.ml_version == '':filter_xml = ''.join(['','',''.format(str(x_func).replace('', '').replace('', '')),'','','','',''.format(str(y_func).replace('', '').replace('', '')),'','','','',''.format(str(z_func).replace('', '').replace('', '')),'','','',''])else:filter_xml = ''.join(['','',''.format(str(x_func).replace('', '').replace('', '')),'','','','',''.format(str(y_func).replace('', '').replace('', '')),'','','','',''.format(str(z_func).replace('', '').replace('', '')),'','','','','' % str(selected).lower(),'','','',''])util.write_filter(script, filter_xml)return None", "docstring": "Geometric function using muparser lib to generate new Coordinates\n\n You can change x, y, z for every vertex according to the function specified.\n\n See help(mlx.muparser_ref) for muparser reference documentation.\n It's possible to use the following per-vertex variables in the expression:\n\n Variables (per vertex):\n x, y, z (coordinates)\n nx, ny, nz (normal)\n r, g, b, a (color)\n q (quality)\n rad (radius)\n vi (vertex index)\n vtu, vtv (texture coordinates)\n ti (texture index)\n vsel (is the vertex selected? 1 yes, 0 no)\n and all custom vertex attributes already defined by user.\n\n Args:\n x_func (str): function to generate new coordinates for x\n y_func (str): function to generate new coordinates for y\n z_func (str): function to generate new coordinates for z\n selected (bool): if True, only affects selected vertices (ML ver 2016.12 & up)\n\n Layer stack:\n No impacts\n\n MeshLab versions:\n 2016.12\n 1.3.4BETA", "id": "f9649:m8"} {"signature": "def radial_flare(script, flare_radius=None, start_radius=None, end_radius=None,end_height=None):", "body": "effective_radius = ''r_func = ''z_func = ''r_func = r_func.replace('', str(effective_radius)).replace('', str(start_radius)).replace('', str(flare_radius))z_func = z_func.replace('', str(effective_radius)).replace('', str(start_radius)).replace('', str(flare_radius))function_cyl_co(script=script, r_func=r_func, z_func=z_func)return None", "docstring": "flare_radius must be >= z2 (height)\nr2 max = flare_radius + r\n\nr2 (num): radius of mesh at end of flare\n\n+15 r= 8.8205\n-15 r= 1.1795\n\nz=10, 5 +/-15 - +/-15*0.74535599249992989880305788957709", "id": "f9649:m11"} {"signature": "def butterfly(script, iterations=, edge_threshold=, selected=False):", "body": "filter_xml = ''.join(['','',''.format(iterations),'','','','',''.format(edge_threshold),'','','','','','',''.format(str(selected).lower()),'','','',''])util.write_filter(script, filter_xml)return None", "docstring": "Apply Butterfly Subdivision Surface algorithm.\n\n It is an interpolated method, defined on arbitrary triangular meshes.\n The scheme is known to be C1 but not C2 on regular meshes.\n\n Args:\n script: the FilterScript object or script filename to write\n the filter to.\n iterations (int): Number of times the model is subdivided.\n edge_threshold (float): All the edges longer than this threshold will\n be refined. Setting this value to zero will force a uniform\n refinement.\n selected (bool): If selected the filter is performed only on the\n selected faces.\n\n Layer stack:\n No impacts\n\n MeshLab versions:\n 2016.12\n 1.3.4BETA", "id": "f9650:m3"} {"signature": "def catmull_clark(script):", "body": "filter_xml = ''util.write_filter(script, filter_xml)return None", "docstring": "Apply the Catmull-Clark Subdivision Surfaces.\n\n Note that position of the new vertices is simply linearly interpolated.\n If the mesh is triangle based (no faux edges) it generates a quad mesh,\n otherwise it honors the faux-edge bits.\n\n Args:\n script: the FilterScript object or script filename to write\n the filter to.\n\n Layer stack:\n No impacts\n\n MeshLab versions:\n 2016.12\n 1.3.4BETA", "id": "f9650:m4"} {"signature": "def curvature_flipping(script, angle_threshold=, curve_type=,selected=False):", "body": "filter_xml = ''.join(['','',''.format(str(selected).lower()),'','','','',''.format(angle_threshold),'','','','',''.format(curve_type),'','','','','','','',''])util.write_filter(script, filter_xml)return None", "docstring": "Use the points and normals to build a surface using the Poisson\n Surface reconstruction approach.\n\n Args:\n script: the FilterScript object or script filename to write\n the filter to.\n angle_threshold (float): To avoid excessive flipping/swapping we\n consider only couple of faces with a significant diedral angle\n (e.g. greater than the indicated threshold).\n curve_type (int): Choose a metric to compute surface curvature on vertices\n H = mean curv, K = gaussian curv, A = area per vertex\n 1: Mean curvature = H\n 2: Norm squared mean curvature = (H * H) / A\n 3: Absolute curvature:\n if(K >= 0) return 2 * H\n else return 2 * sqrt(H ^ 2 - A * K)\n\n Layer stack:\n No impacts\n\n MeshLab versions:\n 2016.12\n 1.3.4BETA", "id": "f9651:m5"} {"signature": "def simplify(script, texture=True, faces=, target_perc=,quality_thr=, preserve_boundary=False, boundary_weight=,optimal_placement=True, preserve_normal=False,planar_quadric=False, selected=False, extra_tex_coord_weight=,preserve_topology=True, quality_weight=False, autoclean=True):", "body": "if texture:if isinstance(script, FilterScript) and (script.ml_version == ''):filter_xml = ''else:filter_xml = ''else:if isinstance(script, FilterScript) and (script.ml_version == ''):filter_xml = ''else:filter_xml = ''filter_xml = ''.join([filter_xml,'',''.format(faces),'','','','',''.format(target_perc),'','','','',''.format(quality_thr),'','','','',''.format(str(preserve_boundary).lower()),'','','','',''.format(boundary_weight),'','','','',''.format(str(optimal_placement).lower()),'','','','',''.format(str(preserve_normal).lower()),'','','','',''.format(str(planar_quadric).lower()),'','','','',''.format(str(selected).lower()),'','',''])if texture: filter_xml = ''.join([filter_xml,'',''.format(extra_tex_coord_weight),'','',''])else: filter_xml = ''.join([filter_xml,'',''.format(str(preserve_topology).lower()),'','','','',''.format(str(quality_weight).lower()),'','','','',''.format(str(autoclean).lower()),'','',''])filter_xml = ''.join([filter_xml, ''])util.write_filter(script, filter_xml)return None", "docstring": "Simplify a mesh using a Quadric based Edge Collapse Strategy, better\n than clustering but slower. Optionally tries to preserve UV\n parametrization for textured meshes.\n\n Args:\n script: the FilterScript object or script filename to write\n the filter to.\n texture (bool):\n faces (int): The desired final number of faces\n target_perc (float): If non zero, this parameter specifies the desired\n final size of the mesh as a percentage of the initial mesh size.\n quality_thr (float): Quality threshold for penalizing bad shaped faces.\n The value is in the range [0..1]0 accept any kind of face (no\n penalties), 0.5 penalize faces with quality less than 0.5,\n proportionally to their shape.\n preserve_boundary (bool): The simplification process tries not to\n affect mesh boundaries\n boundary_weight (float): The importance of the boundary during\n simplification. Default (1.0) means that the boundary has the same\n importance of the rest. Values greater than 1.0 raise boundary\n importance and has the effect of removing less vertices on the\n border. Admitted range of values (0,+inf).\n optimal_placement (bool): Each collapsed vertex is placed in the\n position minimizing the quadric error. It can fail (creating bad\n spikes) in case of very flat areas. If disabled edges are collapsed\n onto one of the two original vertices and the final mesh is\n composed by a subset of the original vertices.\n preserve_normal (bool): Try to avoid face flipping effects and try to\n preserve the original orientation of the surface.\n planar_quadric (bool): Add additional simplification constraints that\n improves the quality of the simplification of the planar portion of\n the mesh.\n selected (bool): The simplification is applied only to the selected set\n of faces. Take care of the target number of faces!\n extra_tex_coord_weight (float): Additional weight for each extra\n Texture Coordinates for every (selected) vertex. Ignored if texture\n is False.\n preserve_topology (bool): Avoid all the collapses that should cause a\n topology change in the mesh (like closing holes, squeezing handles,\n etc). If checked the genus of the mesh should stay unchanged.\n quality_weight (bool): Use the Per-Vertex quality as a weighting factor\n for the simplification. The weight is used as a error amplification\n value, so a vertex with a high quality value will not be simplified\n and a portion of the mesh with low quality values will be\n aggressively simplified.\n autoclean (bool): After the simplification an additional set of steps\n is performed to clean the mesh (unreferenced vertices, bad faces,\n etc).\n\n Layer stack:\n Unchanged; current mesh is simplified in place.\n\n MeshLab versions:\n 2016.12 (different filter name)\n 1.3.4BETA", "id": "f9651:m0"} {"signature": "def cylindrical_vert(script, radius=, inside=True):", "body": "if inside:function = ''.format(radius)else:function = ''.format(radius)vert_function(script, function=function)return None", "docstring": "Select all vertices within a cylindrical radius\n\n Args:\n radius (float): radius of the sphere\n center_pt (3 coordinate tuple or list): center point of the sphere\n\n Layer stack:\n No impacts\n\n MeshLab versions:\n 2016.12\n 1.3.4BETA", "id": "f9652:m13"} {"signature": "def grow(script, iterations=):", "body": "filter_xml = ''for _ in range(iterations):util.write_filter(script, filter_xml)return None", "docstring": "Grow (dilate, expand) the current set of selected faces\n\n Args:\n script: the FilterScript object or script filename to write\n the filter to.\n iterations (int): the number of times to grow the selection.\n\n Layer stack:\n No impacts\n\n MeshLab versions:\n 2016.12\n 1.3.4BETA", "id": "f9652:m4"} {"signature": "def none(script, face=True, vert=True):", "body": "filter_xml = ''.join(['','',''.format(str(face).lower()),'','','','',''.format(str(vert).lower()),'','','',''])util.write_filter(script, filter_xml)return None", "docstring": "Clear the current set of selected faces\n\n Args:\n script: the FilterScript object or script filename to write\n the filter to.\n faces (bool): If True the filter will deselect all the faces.\n verts (bool): If True the filter will deselect all the vertices.\n\n Layer stack:\n No impacts\n\n MeshLab versions:\n 2016.12\n 1.3.4BETA", "id": "f9652:m1"} {"signature": "def spherical_vert(script, radius=, center_pt=(, , )):", "body": "function = ''.format(center_pt[], center_pt[], center_pt[], radius)vert_function(script, function=function)return None", "docstring": "Select all vertices within a spherical radius\n\n Args:\n radius (float): radius of the sphere\n center_pt (3 coordinate tuple or list): center point of the sphere\n\n Layer stack:\n No impacts\n\n MeshLab versions:\n 2016.12\n 1.3.4BETA", "id": "f9652:m14"} {"signature": "def vert_function(script, function='', strict_face_select=True):", "body": "if script.ml_version == '':strict_select = ''.join(['',''.format(str(strict_face_select).lower()),'','','',])else:strict_select = ''filter_xml = ''.join(['','',''.format(str(function).replace('', '').replace('', '')),'','','',strict_select,''])util.write_filter(script, filter_xml)return None", "docstring": "Boolean function using muparser lib to perform vertex selection over current mesh.\n\n See help(mlx.muparser_ref) for muparser reference documentation.\n\n It's possible to use parenthesis, per-vertex variables and boolean operator:\n (, ), and, or, <, >, =\n It's possible to use the following per-vertex variables in the expression:\n\n Variables:\n x, y, z (coordinates)\n nx, ny, nz (normal)\n r, g, b, a (color)\n q (quality)\n rad\n vi (vertex index)\n vtu, vtv (texture coordinates)\n ti (texture index)\n vsel (is the vertex selected? 1 yes, 0 no)\n and all custom vertex attributes already defined by user.\n\n Args:\n script: the FilterScript object or script filename to write\n the filter] to.\n function (str): a boolean function that will be evaluated in order\n to select a subset of vertices. Example: (y > 0) and (ny > 0)\n strict_face_select (bool): if True a face is selected if ALL its\n vertices are selected. If False a face is selected if at least\n one of its vertices is selected. ML v1.3.4BETA only; this is\n ignored in 2016.12. In 2016.12 only vertices are selected.\n\n Layer stack:\n No impacts\n\n MeshLab versions:\n 2016.12\n 1.3.4BETA", "id": "f9652:m12"} {"signature": "def invert(script, face=True, vert=True):", "body": "filter_xml = ''.join(['','',''.format(str(face).lower()),'','','','',''.format(str(vert).lower()),'','','',''])util.write_filter(script, filter_xml)return None", "docstring": "Invert the current set of selected faces\n\n Args:\n script: the FilterScript object or script filename to write\n the filter to.\n faces (bool): If True the filter will invert the selected faces.\n verts (bool): If True the filter will invert the selected vertices.\n\n Layer stack:\n No impacts\n\n MeshLab versions:\n 2016.12\n 1.3.4BETA", "id": "f9652:m2"} {"signature": "def small_parts(script, ratio=, non_closed_only=False):", "body": "filter_xml = ''.join(['','',''.format(ratio),'','','','',''.format(str(non_closed_only).lower()),'','','',''])util.write_filter(script, filter_xml)return None", "docstring": "Select the small disconnected parts (components) of a mesh.\n\n Args:\n script: the FilterScript object or script filename to write\n the filter to.\n ratio (float): This ratio (between 0 and 1) defines the meaning of\n 'small' as the threshold ratio between the number of faces of the\n largest component and the other ones. A larger value will select\n more components.\n non_closed_only (bool): Select only non-closed components.\n\n Layer stack:\n No impacts\n\n MeshLab versions:\n 2016.12\n 1.3.4BETA", "id": "f9652:m9"} {"signature": "def taubin(script, iterations=, t_lambda=, t_mu=-, selected=False):", "body": "filter_xml = ''.join(['','',''.format(t_lambda),'','','','',''.format(t_mu),'','','','',''.format(iterations),'','','','',''.format(str(selected).lower()),'','','',''])util.write_filter(script, filter_xml)return None", "docstring": "The lambda & mu Taubin smoothing, it make two steps of smoothing, forth\n and back, for each iteration.\n\n Based on:\n Gabriel Taubin\n \"A signal processing approach to fair surface design\"\n Siggraph 1995\n\n Args:\n script: the FilterScript object or script filename to write\n the filter to.\n iterations (int): The number of times that the taubin smoothing is\n iterated. Usually it requires a larger number of iteration than the\n classical laplacian.\n t_lambda (float): The lambda parameter of the Taubin Smoothing algorithm\n t_mu (float): The mu parameter of the Taubin Smoothing algorithm\n selected (bool): If selected the filter is performed only on the\n selected faces\n\n Layer stack:\n No impacts\n\n MeshLab versions:\n 2016.12\n 1.3.4BETA", "id": "f9653:m2"} {"signature": "def hc_laplacian(script):", "body": "filter_xml = ''util.write_filter(script, filter_xml)return None", "docstring": "HC Laplacian Smoothing, extended version of Laplacian Smoothing, based\n on the paper of Vollmer, Mencl, and Muller\n\n Layer stack:\n No impacts\n\n MeshLab versions:\n 2016.12\n 1.3.4BETA", "id": "f9653:m1"} {"signature": "def laplacian(script, iterations=, boundary=True, cotangent_weight=True,selected=False):", "body": "filter_xml = ''.join(['','',''.format(iterations),'','','','',''.format(str(boundary).lower()),'','','','',''.format(str(cotangent_weight).lower()),'','','','',''.format(str(selected).lower()),'','','',''])util.write_filter(script, filter_xml)return None", "docstring": "Laplacian smooth of the mesh: for each vertex it calculates the average\n position with nearest vertex\n\n Args:\n script: the FilterScript object or script filename to write\n the filter to.\n iterations (int): The number of times that the whole algorithm (normal\n smoothing + vertex fitting) is iterated.\n boundary (bool): If true the boundary edges are smoothed only by\n themselves (e.g. the polyline forming the boundary of the mesh is\n independently smoothed). Can reduce the shrinking on the border but\n can have strange effects on very small boundaries.\n cotangent_weight (bool): If True the cotangent weighting scheme is\n computed for the averaging of the position. Otherwise (False) the\n simpler umbrella scheme (1 if the edge is present) is used.\n selected (bool): If selected the filter is performed only on the\n selected faces\n\n Layer stack:\n No impacts\n\n MeshLab versions:\n 2016.12\n 1.3.4BETA", "id": "f9653:m0"} {"signature": "def join(script, merge_visible=True, merge_vert=False, delete_layer=True,keep_unreferenced_vert=False):", "body": "filter_xml = ''.join(['','',''.format(str(merge_visible).lower()),'','','','',''.format(str(merge_vert).lower()),'','','','',''.format(str(delete_layer).lower()),'','','','',''.format(str(keep_unreferenced_vert).lower()),'','','',''])util.write_filter(script, filter_xml)if isinstance(script, mlx.FilterScript):script.add_layer('')if delete_layer:for i in range(script.last_layer()):script.del_layer()return None", "docstring": "Flatten all or only the visible layers into a single new mesh.\n\n Transformations are preserved. Existing layers can be optionally\n deleted.\n\n Args:\n script: the mlx.FilterScript object or script filename to write\n the filter to.\n merge_visible (bool): merge only visible layers\n merge_vert (bool): merge the vertices that are duplicated among\n different layers. Very useful when the layers are spliced portions\n of a single big mesh.\n delete_layer (bool): delete all the merged layers. If all layers are\n visible only a single layer will remain after the invocation of\n this filter.\n keep_unreferenced_vert (bool): Do not discard unreferenced vertices\n from source layers. Necessary for point-only layers.\n\n Layer stack:\n Creates a new layer \"Merged Mesh\"\n Changes current layer to the new layer\n Optionally deletes all other layers\n\n MeshLab versions:\n 2016.12\n 1.3.4BETA\n\n Bugs:\n UV textures: not currently preserved, however will be in a future\n release. https://github.com/cnr-isti-vclab/meshlab/issues/128\n merge_visible: it is not currently possible to change the layer\n visibility from meshlabserver, however this will be possible\n in the future https://github.com/cnr-isti-vclab/meshlab/issues/123", "id": "f9655:m0"} {"signature": "def rename(script, label='', layer_num=None):", "body": "filter_xml = ''.join(['','',''.format(label),'','','',''])if isinstance(script, mlx.FilterScript):if (layer_num is None) or (layer_num == script.current_layer()):util.write_filter(script, filter_xml)script.layer_stack[script.current_layer()] = labelelse:cur_layer = script.current_layer()change(script, layer_num)util.write_filter(script, filter_xml)change(script, cur_layer)script.layer_stack[layer_num] = labelelse:util.write_filter(script, filter_xml)return None", "docstring": "Rename layer label\n\n Can be useful for outputting mlp files, as the output file names use\n the labels.\n\n Args:\n script: the mlx.FilterScript object or script filename to write\n the filter to.\n label (str): new label for the mesh layer\n layer_num (int): layer number to rename. Default is the\n current layer. Not supported on the file base API.\n\n Layer stack:\n Renames a layer\n\n MeshLab versions:\n 2016.12\n 1.3.4BETA", "id": "f9655:m2"} {"signature": "def delete_lower(script, layer_num=None):", "body": "if layer_num is None:layer_num = script.current_layer()if layer_num != :change(script, )for i in range(layer_num):delete(script, )return None", "docstring": "Delete all layers below the specified one.\n\n Useful for MeshLab ver 2016.12, whcih will only output layer 0.", "id": "f9655:m6"} {"signature": "def selected(script, face=True, vert=True):", "body": "if face and vert:filter_xml = ''elif face and not vert:filter_xml = ''elif not face and vert:filter_xml = ''util.write_filter(script, filter_xml)return None", "docstring": "Delete selected vertices and/or faces\n\n Note: if the mesh has no faces (e.g. a point cloud) you must\n set face=False, or the vertices will not be deleted\n\n Args:\n script: the FilterScript object or script filename to write\n the filter to.\n face (bool): if True the selected faces will be deleted. If vert\n is also True, then all the vertices surrounded by those faces will\n also be deleted. Note that if no faces are selected (only vertices)\n then this filter will not do anything. For example, if you want to\n delete a point cloud selection, you must set this to False.\n vert (bool): if True the selected vertices will be deleted.\n\n Layer stack:\n No impacts\n\n MeshLab versions:\n 2016.12\n 1.3.4BETA", "id": "f9656:m3"} {"signature": "def nonmanifold_edge(script):", "body": "select.nonmanifold_edge(script)selected(script, face=False)return None", "docstring": "Select & delete the faces and the vertices incident on\n non manifold edges (e.g. edges where more than two faces are incident).\n\n Note that this function selects the components that are related to\n non manifold edges. The case of non manifold vertices is specifically\n managed by nonmanifold_vert.\n\n Args:\n script: the FilterScript object or script filename to write\n the filter to.\n\n Layer stack:\n No impacts\n\n MeshLab versions:\n 2016.12\n 1.3.4BETA", "id": "f9656:m1"} {"signature": "def nonmanifold_vert(script):", "body": "select.nonmanifold_vert(script)selected(script, face=False)return None", "docstring": "Select & delete the non manifold vertices that do not belong to\n non manifold edges.\n\n For example two cones connected by their apex. Vertices incident on\n non manifold edges are ignored.\n\n Args:\n script: the FilterScript object or script filename to write\n the filter to.\n\n Layer stack:\n No impacts\n\n MeshLab versions:\n 2016.12\n 1.3.4BETA", "id": "f9656:m0"} {"signature": "def poisson_disk(script, sample_num=, radius=,montecarlo_rate=, save_montecarlo=False,approx_geodesic_dist=False, subsample=False, refine=False,refine_layer=, best_sample=True, best_sample_pool=,exact_num=False, radius_variance=):", "body": "filter_xml = ''.join(['','',''.format(sample_num),'','','','',''.format(radius),'','','','','','',''.format(montecarlo_rate),'','','','',''.format(str(save_montecarlo).lower()),'','','','',''.format(str(approx_geodesic_dist).lower()),'','','','',''.format(str(subsample).lower()),'','','','',''.format(str(refine).lower()),'','','','',''.format(refine_layer),'','','','',''.format(str(best_sample).lower()),'','','','',''.format(best_sample_pool),'','','','',''.format(str(exact_num).lower()),'','','','',''.format(radius_variance),'','','',''])util.write_filter(script, filter_xml)if isinstance(script, FilterScript):script.add_layer('')if save_montecarlo:script.add_layer('')return None", "docstring": "Create a new layer populated with a point sampling of the current mesh.\n\n Samples are generated according to a Poisson-disk distribution, using the\n algorithm described in:\n\n 'Efficient and Flexible Sampling with Blue Noise Properties of Triangular Meshes'\n Massimiliano Corsini, Paolo Cignoni, Roberto Scopigno\n IEEE TVCG 2012\n\n Args:\n script: the FilterScript object or script filename to write\n the filter to.\n sample_num (int): The desired number of samples. The radius of the disk\n is calculated according to the sampling density.\n radius (float): If not zero this parameter overrides the previous\n parameter to allow exact radius specification.\n montecarlo_rate (int): The over-sampling rate that is used to generate\n the intial Monte Carlo samples (e.g. if this parameter is 'K' means\n that 'K * sample_num' points will be used). The generated\n Poisson-disk samples are a subset of these initial Monte Carlo\n samples. Larger numbers slow the process but make it a bit more\n accurate.\n save_montecarlo (bool): If True, it will generate an additional Layer\n with the Monte Carlo sampling that was pruned to build the Poisson\n distribution.\n approx_geodesic_dist (bool): If True Poisson-disk distances are\n computed using an approximate geodesic distance, e.g. an Euclidean\n distance weighted by a function of the difference between the\n normals of the two points.\n subsample (bool): If True the original vertices of the base mesh are\n used as base set of points. In this case the sample_num should be\n obviously much smaller than the original vertex number. Note that\n this option is very useful in the case you want to subsample a\n dense point cloud.\n refine (bool): If True the vertices of the refine_layer mesh layer are\n used as starting vertices, and they will be utterly refined by\n adding more and more points until possible.\n refine_layer (int): Used only if refine is True.\n best_sample (bool): If True it will use a simple heuristic for choosing\n the samples. At a small cost (it can slow the process a bit) it\n usually improves the maximality of the generated sampling.\n best_sample_pool (bool): Used only if best_sample is True. It controls\n the number of attempts that it makes to get the best sample. It is\n reasonable that it is smaller than the Monte Carlo oversampling\n factor.\n exact_num (bool): If True it will try to do a dicotomic search for the\n best Poisson-disk radius that will generate the requested number of\n samples with a tolerance of the 0.5%. Obviously it takes much\n longer.\n radius_variance (float): The radius of the disk is allowed to vary\n between r and r*var. If this parameter is 1 the sampling is the\n same as the Poisson-disk Sampling.\n\n Layer stack:\n Creates new layer 'Poisson-disk Samples'. Current layer is NOT changed\n to the new layer (see Bugs).\n If save_montecarlo is True, creates a new layer 'Montecarlo Samples'.\n Current layer is NOT changed to the new layer (see Bugs).\n\n MeshLab versions:\n 2016.12\n 1.3.4BETA\n\n Bugs:\n Current layer is NOT changed to the new layer, which is inconsistent\n with the majority of filters that create new layers.", "id": "f9657:m1"} {"signature": "def clustered_vert(script, cell_size=, strategy='', selected=False):", "body": "if strategy.lower() == '':strategy_num = elif strategy.lower() == '':strategy_num = filter_xml = ''.join(['','',''.format(cell_size),'','','','','','',''.format(strategy_num),'','','','','','','',''.format(str(selected).lower()),'','','',''])util.write_filter(script, filter_xml)if isinstance(script, FilterScript):script.add_layer('')return None", "docstring": "Create a new layer populated with a subsampling of the vertexes of the\n current mesh\n\n The subsampling is driven by a simple one-per-gridded cell strategy.\n\n Args:\n script: the FilterScript object or script filename to write\n the filter to.\n cell_size (float): The size of the cell of the clustering grid. Smaller the cell finer the resulting mesh. For obtaining a very coarse mesh use larger values.\n strategy (enum 'AVERAGE' or 'CENTER'): <b>Average</b>: for each cell we take the average of the sample falling into. The resulting point is a new point.<br><b>Closest to center</b>: for each cell we take the sample that is closest to the center of the cell. Choosen vertices are a subset of the original ones.\n selected (bool): If true only for the filter is applied only on the selected subset of the mesh.\n\n Layer stack:\n Creates new layer 'Cluster Samples'. Current layer is changed to the new\n layer.\n\n MeshLab versions:\n 2016.12\n 1.3.4BETA", "id": "f9657:m3"} {"signature": "def param_texture_from_rasters(script, textName=\"\", texsize=,colorCorrection=True, colorCorrectionFilterSize=,useDistanceWeight=True, useImgBorderWeight=True,useAlphaWeight=False, cleanIsolatedTriangles=True,stretchingAllowed=False, textureGutter=):", "body": "filter_xml = ''.join(['','','' % texsize,'','','','','','' % textName,'','','','','','' % str(colorCorrection).lower(),'','','','','','' % colorCorrectionFilterSize,'','','','','','' % str(useDistanceWeight).lower(),'','','','','','' % str(useImgBorderWeight).lower(),'','','','','','' % str(useAlphaWeight).lower(),'','','','','','' % str(cleanIsolatedTriangles).lower(),'','','','','','' % str(stretchingAllowed).lower(),'','','','','','' % textureGutter,'','','','',''])util.write_filter(script, filter_xml)return None", "docstring": "Set texture", "id": "f9658:m11"} {"signature": "def project_rasters(script, tex_file_out=\"\", tex_size=,fill_atlas_gaps=False, depth_threshold=,selected=False, use_angle=True, use_distance=True,use_borders=True, use_silhouettes=True, use_alpha=False):", "body": "filter_xml = ''.join(['','','' % tex_file_out,'','','','','','' % tex_size,'','','','','','' % str(fill_atlas_gaps).lower(),'','','','','','' % depth_threshold,'','','','','','' % str(selected).lower(),'','','','','','' % str(use_angle).lower(),'','','','','','' % str(use_distance).lower(),'','','','','','' % str(use_borders).lower(),'','','','','','' % str(use_silhouettes).lower(),'','','','','','' % str(use_alpha).lower(),'','','','',''])util.write_filter(script, filter_xml)return None", "docstring": "Set texture\n\n Creates new texture file\n tex_file_out = must be png\n fill_atlas_gaps = setting this to false will leave the unprojected area transparent. This can then be easily composed with the original texture with PIL", "id": "f9658:m10"} {"signature": "def set_texture(script, textName=\"\", textDim=):", "body": "filter_xml = ''.join(['','','' % textName,'','','','','','' % textDim,'','','','',''])util.write_filter(script, filter_xml)return None", "docstring": "Set texture", "id": "f9658:m9"} {"signature": "def per_triangle(script, sidedim=, textdim=, border=, method=):", "body": "filter_xml = ''.join(['','','' % sidedim,'','','','','','' % textdim,'','','','','','' % border,'','','','','','' % method,'','','','','','''',''])util.write_filter(script, filter_xml)return None", "docstring": "Trivial Per-Triangle parameterization", "id": "f9658:m1"} {"signature": "def isometric_load(script, AbsName=\"\"):", "body": "filter_xml = ''.join(['','','' % AbsName,'','','','',''])util.write_filter(script, filter_xml)return None", "docstring": "Isometric parameterization: Load Abstract Domain", "id": "f9658:m6"} {"signature": "def isometric_remesh(script, SamplingRate=):", "body": "filter_xml = ''.join(['','','' % SamplingRate,'','','','',''])util.write_filter(script, filter_xml)return None", "docstring": "Isometric parameterization: remeshing", "id": "f9658:m8"} {"signature": "def isometric_transfer(script, sourceMesh=, targetMesh=):", "body": "filter_xml = ''.join(['','','' % sourceMesh,'','','','','','' % targetMesh,'','','','',''])util.write_filter(script, filter_xml)return None", "docstring": "Isometric parameterization: transfer between meshes\n\n Provide the layer numbers of the source and target meshes.", "id": "f9658:m7"} {"signature": "def vert_attr(script, name='', function=''):", "body": "filter_xml = ''.join(['','',''.format(name),'','','','',''.format(str(function).replace('', '').replace('', '')),'','','',''])util.write_filter(script, filter_xml)return None", "docstring": "Add a new Per-Vertex scalar attribute to current mesh and fill it with\n the defined function.\n\n The specified name can be used in other filter functions.\n\n It's possible to use parenthesis, per-vertex variables and boolean operator:\n (, ), and, or, <, >, =\n It's possible to use the following per-vertex variables in the expression:\n\n Variables:\n x, y, z (coordinates)\n nx, ny, nz (normal)\n r, g, b, a (color)\n q (quality)\n rad\n vi (vertex index)\n ?vtu, vtv (texture coordinates)\n ?ti (texture index)\n ?vsel (is the vertex selected? 1 yes, 0 no)\n and all custom vertex attributes already defined by user.\n\n Args:\n script: the FilterScript object or script filename to write\n the filter] to.\n name (str): the name of new attribute. You can access attribute in\n other filters through this name.\n function (str): function to calculate custom attribute value for each\n vertex\n\n Layer stack:\n No impacts\n\n MeshLab versions:\n 2016.12\n 1.3.4BETA", "id": "f9659:m11"} {"signature": "def mp_atan2(y, x):", "body": "return ''.replace('', str(math.pi)).replace('', y).replace('', x)", "docstring": "muparser atan2 function\n\n Implements an atan2(y,x) function for older muparser versions (<2.1.0);\n atan2 was added as a built-in function in muparser 2.1.0\n\n Args:\n y (str): y argument of the atan2(y,x) function\n x (str): x argument of the atan2(y,x) function\n\n Returns:\n A muparser string that calculates atan2(y,x)", "id": "f9659:m1"} {"signature": "def face_attr(script, name='', function=''):", "body": "filter_xml = ''.join(['','',''.format(name),'','','','',''.format(str(function).replace('', '').replace('', '')),'','','',''])util.write_filter(script, filter_xml)return None", "docstring": "Add a new Per-Face attribute to current mesh and fill it with\n the defined function..\n\n The specified name can be used in other filter functions.\n\n It's possible to use parenthesis, per-face variables and boolean operator:\n (, ), and, or, <, >, =\n\n It's possible to use per-face variables like attributes associated to the\n three vertices of every face.\n\n It's possible to use the following per-face variables in the expression:\n\n Variables:\n x0,y0,z0 (first vertex); x1,y1,z1 (second vertex); x2,y2,z2 (third vertex)\n nx0,ny0,nz0; nx1,ny1,nz1; nx2,ny2,nz2 (normals)\n r0,g0,b0 (color)\n q0,q1,q2 (quality)\n wtu0, wtv0; wtu1, wtv1; wtu2, wtv2 (per wedge tex coord)\n fi (face index)\n ?fsel (is the vertex selected? 1 yes, 0 no)\n fi (face index)\n and all custom face attributes already defined by user.\n\n Args:\n script: the FilterScript object or script filename to write\n the filter] to.\n name (str): the name of new attribute. You can access attribute in\n other filters through this name.\n function (str): function to calculate custom attribute value for each\n face\n\n Layer stack:\n No impacts\n\n MeshLab versions:\n 2016.12\n 1.3.4BETA", "id": "f9659:m12"} {"signature": "def section(script, axis='', offset=, surface=False, custom_axis=None,planeref=):", "body": "if axis.lower() == '':axis_num = axis_name = ''elif axis.lower() == '':axis_num = axis_name = ''elif axis.lower() == '':axis_num = axis_name = ''else: axis_num = axis_name = ''if custom_axis is None:print('','')if custom_axis is None:custom_axis = (, , )filter_xml = ''.join(['','',''.format(axis_num),'','','','','','','','','',''.format(custom_axis[], custom_axis[],custom_axis[]),'','','','',''.format(offset),'','','','',''.format(planeref),'','','','','','','','',''.format(str(surface).lower()),'','','',''])util.write_filter(script, filter_xml)if isinstance(script, mlx.FilterScript):current_layer_label = script.layer_stack[script.current_layer()]script.add_layer(''.format(current_layer_label, axis_name,int(offset)))if surface:script.add_layer(''.format(current_layer_label,axis_name, int(offset)))return None", "docstring": "Compute the polyline representing a planar section (a slice) of a mesh.\n\n If the resulting polyline is closed the result can be filled with a\n triangular mesh representing the section.\n\n Args:\n script: the mlx.FilterScript object or script filename to write\n the filter to.\n axis (str): The slicing plane is perpendicular to this axis. Accepted\n values are 'x', 'y', or 'z'; any other input will be interpreted\n as a custom axis (although using 'custom' is recommended\n for clarity). Upper or lowercase values are accepted.\n offset (float): Specify an offset of the cross-plane. The offset\n corresponds to the distance along 'axis' from the point specified\n in 'planeref'.\n surface (bool): If True, in addition to a layer with the section\n polyline, also a layer with a triangulated version of the section\n polyline will be created. This only works if the section polyline\n is closed.\n custom_axis (3 component list or tuple): Specify a custom axis as\n a 3 component vector (x, y, z); this is ignored unless 'axis' is\n set to 'custom'.\n planeref (int): Specify the reference from which the planes are\n shifted. Valid values are:\n 0 - Bounding box center\n 1 - Bounding box min\n 2 - Origin (default)\n\n Layer stack:\n Creates a new layer '{label}_sect_{axis_name}_{offset}', where\n 'axis_name' is one of [X, Y, Z, custom] and 'offest' is\n truncated 'offset'\n If surface is True, create a new layer '{label}_sect_{axis}_{offset}_mesh'\n Current layer is changed to the last (newly created) layer\n\n MeshLab versions:\n 2016.12\n 1.3.4BETA", "id": "f9660:m0"} {"signature": "def parse_hausdorff(ml_log, log=None, print_output=False):", "body": "hausdorff_distance = {\"\": ,\"\": ,\"\": ,\"\": ,\"\": }with open(ml_log) as fread:result = fread.readlines()data = \"\"for idx, line in enumerate(result):m = re.match(r\"\", line)if m is not None:hausdorff_distance[\"\"] = int(m.group())if '' in line:data = result[idx + ]m = re.match(r\"\", data)hausdorff_distance[\"\"] = float(m.group())hausdorff_distance[\"\"] = float(m.group())hausdorff_distance[\"\"] = float(m.group())hausdorff_distance[\"\"] = float(m.group())for key, value in hausdorff_distance.items():if log is not None:log_file = open(log, '')log_file.write(''.format(key, value))log_file.close()elif print_output:print(''.format(key, value))return hausdorff_distance", "docstring": "Parse the ml_log file generated by the hausdorff_distance function.\n\n Args:\n ml_log (str): MeshLab log file to parse\n log (str): filename to log output\n\n Returns:\n dict: dictionary with the following keys:\n number_points (int): number of points in mesh\n min_distance (float): minimum hausdorff distance\n max_distance (float): maximum hausdorff distance\n mean_distance (float): mean hausdorff distance\n rms_distance (float): root mean square distance", "id": "f9660:m5"} {"signature": "def find_texture_files(fbasename, log=None):", "body": "fext = os.path.splitext(fbasename)[][:].strip().lower()material_file = Nonetexture_files = []vert_colors = Falseface_colors = Falseif fext == '':with open(fbasename, '') as fread:for line in fread:if '' in line:material_file = os.path.basename(line.split()[])breakif material_file is not None:with open(material_file, '') as fread:for line in fread:if '' in line:texture_files.append(os.path.basename(line.split()[]))elif fext == '':face_element = Falsewith open(fbasename, '') as fread:while True:line = fread.readline().strip().decode('')if '' in line:face_element = Trueif '' in line:if face_element:face_colors = Trueelse:vert_colors = Trueif '' in line:texture_files.append(os.path.basename(line.split()[]))if '' in line:breakelif fext == '': namespace = ''tree = ET.parse(fbasename)for elem in tree.findall('' % (namespace, namespace, namespace)):texture_files.append(elem.text)elif fext == '':tree = ET.parse(fbasename)for elem in tree.iter(tag=''):texture_files.append(elem.attrib[''])elif fext == '':with open(fbasename, '') as fread:for line in fread:if '' in line:texture_files.append(os.path.basename(line.split('')[]))elif fext != '': print('' % fext)texture_files_unique = list(set(texture_files))if log is not None:log_file = open(log, '')log_file.write('')log_file.write('' % fbasename)log_file.write('' % texture_files)log_file.write('' % texture_files_unique)log_file.write('' % len(texture_files))log_file.write('' %len(texture_files_unique))log_file.write('' % vert_colors)log_file.write('' % face_colors)log_file.close()colors = {'':bool(texture_files), '':vert_colors, '':face_colors}return texture_files, texture_files_unique, material_file, colors", "docstring": "Finds the filenames of the referenced texture file(s) (and material\n file for obj) for the mesh.\n\n Args:\n fbasename (str): input filename. Supported file extensions:\n obj\n ply\n dae\n x3d\n wrl\n log (str): filename to log output\n\n Returns:\n list: list of all of the texture filenames referenced by the input file.\n May contain duplicates if the texture files are referenced more\n than once. List is empty if no texture files are found.\n list: list of all of the unique texture filenames, also empty if no\n texture files are found.\n str: for obj files only, returns the name of the referenced material file.\n Returns None if no material file is found.", "id": "f9661:m2"} {"signature": "def create_mlp(file_out, mlp_mesh=None, mlp_raster=None):", "body": "mlp_file = open(file_out, '')mlp_file.write(''.join(['','']))mlp_file.close()if mlp_mesh is not None:mlp_file = open(file_out, '')mlp_file.write('')for i, val in enumerate(mlp_mesh):if '' not in mlp_mesh[i]:mlp_mesh[i][''] = mlp_mesh[i]['']if '' not in mlp_mesh[i]:mlp_mesh[i][''] = [[, , , ], [, , , ], [, , , ], [, , , ]]mlp_file.write(''.format(mlp_mesh[i][''], mlp_mesh[i]['']))mlp_file.write(''.join(['',''.format(m=mlp_mesh[i][''][]),''.format(m=mlp_mesh[i][''][]),''.format(m=mlp_mesh[i][''][]),''.format(m=mlp_mesh[i][''][]),'','']))mlp_file.write('')mlp_file.close()else:mlp_file = open(file_out, '')mlp_file.write('')mlp_file.close()if mlp_raster is not None:mlp_file = open(file_out, '')mlp_file.write('')for i, val in enumerate(mlp_raster):if '' not in mlp_raster[i]:mlp_raster[i][''] = mlp_raster[i]['']if '' not in mlp_raster[i]:mlp_raster[i][''] = if '' not in mlp_raster[i]['']:mlp_raster[i][''][''] = [, ]if '' not in mlp_raster[i]['']:mlp_raster[i][''][''] = [int(mlp_raster[i][''][''][]/), int(mlp_raster[i][''][''][]/)]mlp_file.write(''.format(mlp_raster[i]['']))mlp_file.write(''.join(['',''.format(m=mlp_raster[i]['']['']),''.format(m=mlp_raster[i]['']['']),''.format(mlp_raster[i]['']['']),''.format(m=mlp_raster[i]['']['']),''.format(m=mlp_raster[i]['']['']),''.format(m=mlp_raster[i]['']['']),''.format(m=mlp_raster[i]['']['']),'']))mlp_file.write(''.format(mlp_raster[i][''], mlp_raster[i]['']))mlp_file.write('')mlp_file.write('')mlp_file.close()else:mlp_file = open(file_out, '')mlp_file.write('')mlp_file.close()mlp_file = open(file_out, '')mlp_file.write('')mlp_file.close()return", "docstring": "Create mlp file\n mlp_mesh (list containing dictionary)\n filename*\n label\n matrix\n\n mlp_raster\n filename*\n label\n semantic\n camera\n trans_vector*\n rotation_matrix*\n focal_length*\n image_px*\n image_res_mm_per_px*\n lens_distortion\n center_px\n * Required\n\n http://vcg.isti.cnr.it/~cignoni/newvcglib/html/shot.html", "id": "f9661:m6"} {"signature": "def add_layer(self, label, change_layer=True):", "body": "self.layer_stack.insert(self.last_layer() + , label)if change_layer:self.set_current_layer(self.last_layer())return None", "docstring": "Add new mesh layer to the end of the stack\n\n Args:\n label (str): new label for the mesh layer\n change_layer (bool): change to the newly created layer", "id": "f9661:c0:m4"} {"signature": "def default_output_mask(file_out, texture=True, vert_normals=True, vert_colors=False,face_colors=False, ml_version=ML_VERSION):", "body": "vn = ''wt = ''vc = ''fc = ''if ml_version < '':om = ''else:om = ''fext = os.path.splitext(file_out)[][:].strip().lower()if fext in ['', '', '']:om = ''texture = Falsevert_normals = Falsevert_colors = Falseface_colors = False", "docstring": "Set default output mask options based on file extension\nNote: v1.34BETA changed -om switch to -m\nPossible options (not all options are available for every format):\n vc -> vertex colors\n vf -> vertex flags\n vq -> vertex quality\n vn -> vertex normals\n vt -> vertex texture coords\n fc -> face colors\n ff -> face flags\n fq -> face quality\n fn -> face normals\n wc -> wedge colors\n wn -> wedge normals\n wt -> wedge texture coords", "id": "f9661:m3"} {"signature": "def install_npm(path=None, build_dir=None, source_dir=None, build_cmd='', force=False, npm=None):", "body": "class NPM(BaseCommand):description = ''def run(self):if skip_npm:log.info('')returnnode_package = path or HEREnode_modules = pjoin(node_package, '')is_yarn = os.path.exists(pjoin(node_package, ''))npm_cmd = npmif npm is None:if is_yarn:npm_cmd = ['']else:npm_cmd = ['']if not which(npm_cmd[]):log.error(\"\"\"\".format(npm_cmd[]))returnif force or is_stale(node_modules, pjoin(node_package, '')):log.info('''')run(npm_cmd + [''], cwd=node_package)if build_dir and source_dir and not force:should_build = is_stale(build_dir, source_dir)else:should_build = Trueif should_build:run(npm_cmd + ['', build_cmd], cwd=node_package)return NPM", "docstring": "Return a Command for managing an npm installation.\n\n Note: The command is skipped if the `--skip-npm` flag is used.\n\n Parameters\n ----------\n path: str, optional\n The base path of the node package. Defaults to the repo root.\n build_dir: str, optional\n The target build directory. If this and source_dir are given,\n the JavaScript will only be build if necessary.\n source_dir: str, optional\n The source code directory.\n build_cmd: str, optional\n The npm command to build assets to the build_dir.\n npm: str or list, optional.\n The npm executable name, or a tuple of ['node', executable].", "id": "f9671:m12"} {"signature": "def get_version(file, name=''):", "body": "path = os.path.realpath(file)version_ns = {}with io.open(path, encoding=\"\") as f:exec(f.read(), {}, version_ns)return version_ns[name]", "docstring": "Get the version of the package from the given file by\n executing it and extracting the given `name`.", "id": "f9671:m0"} {"signature": "def combine_commands(*commands):", "body": "class CombinedCommand(Command):user_options = []def initialize_options(self):self.commands = []for C in commands:self.commands.append(C(self.distribution))for c in self.commands:c.initialize_options()def finalize_options(self):for c in self.commands:c.finalize_options()def run(self):for c in self.commands:c.run()return CombinedCommand", "docstring": "Return a Command that combines several commands.", "id": "f9671:m8"} {"signature": "def create_cmdclass(prerelease_cmd=None, package_data_spec=None,data_files_spec=None):", "body": "wrapped = [prerelease_cmd] if prerelease_cmd else []if package_data_spec or data_files_spec:wrapped.append('')wrapper = functools.partial(_wrap_command, wrapped)handle_files = _get_file_handler(package_data_spec, data_files_spec)if '' in sys.argv:egg = wrapper(bdist_egg, strict=True)else:egg = bdist_egg_disabledcmdclass = dict(build_py=wrapper(build_py, strict=is_repo),bdist_egg=egg,sdist=wrapper(sdist, strict=True),handle_files=handle_files,)if bdist_wheel:cmdclass[''] = wrapper(bdist_wheel, strict=True)cmdclass[''] = wrapper(develop, strict=True)return cmdclass", "docstring": "Create a command class with the given optional prerelease class.\n\n Parameters\n ----------\n prerelease_cmd: (name, Command) tuple, optional\n The command to run before releasing.\n package_data_spec: dict, optional\n A dictionary whose keys are the dotted package names and\n whose values are a list of glob patterns.\n data_files_spec: list, optional\n A list of (path, dname, pattern) tuples where the path is the\n `data_files` install path, dname is the source directory, and the\n pattern is a glob pattern.\n\n Notes\n -----\n We use specs so that we can find the files *after* the build\n command has run.\n\n The package data glob patterns should be relative paths from the package\n folder containing the __init__.py file, which is given as the package\n name.\n e.g. `dict(foo=['./bar/*', './baz/**'])`\n\n The data files directories should be absolute paths or relative paths\n from the root directory of the repository. Data files are specified\n differently from `package_data` because we need a separate path entry\n for each nested folder in `data_files`, and this makes it easier to\n parse.\n e.g. `('share/foo/bar', 'pkgname/bizz, '*')`", "id": "f9671:m4"} {"signature": "def find_packages(top=HERE):", "body": "packages = []for d, dirs, _ in os.walk(top, followlinks=True):if os.path.exists(pjoin(d, '')):packages.append(os.path.relpath(d, top).replace(os.path.sep, ''))elif d != top:dirs[:] = []return packages", "docstring": "Find all of the packages.", "id": "f9671:m2"} {"signature": "def _join_translated(translated_parts, os_sep_class):", "body": "res = ''for part in translated_parts[:-]:if part == '':res += partelse:res += part + os_sep_classif translated_parts[-] == '':res += ''res += ''.format(os_sep_class=os_sep_class)else:res += translated_parts[-]return res", "docstring": "Join translated glob pattern parts.\n\n This is different from a simple join, as care need to be taken\n to allow ** to match ZERO or more directories.", "id": "f9671:m23"} {"signature": "def ensure_targets(targets):", "body": "class TargetsCheck(BaseCommand):def run(self):if skip_npm:log.info('')returnmissing = [t for t in targets if not os.path.exists(t)]if missing:raise ValueError(('' % missing))return TargetsCheck", "docstring": "Return a Command that checks that certain files exist.\n\n Raises a ValueError if any of the files are missing.\n\n Note: The check is skipped if the `--skip-npm` flag is used.", "id": "f9671:m13"} {"signature": "def _iexplode_path(path):", "body": "(head, tail) = os.path.split(path)if not head or (not tail and head == path):if head:yield headif tail or not head:yield tailreturnfor p in _iexplode_path(head):yield pyield tail", "docstring": "Iterate over all the parts of a path.\n\n Splits path recursively with os.path.split().", "id": "f9671:m21"} {"signature": "def compare_recursive_mtime(path, cutoff, newest=True):", "body": "if os.path.isfile(path):mt = mtime(path)if newest:if mt > cutoff:return Trueelif mt < cutoff:return Truefor dirname, _, filenames in os.walk(path, topdown=False):for filename in filenames:mt = mtime(pjoin(dirname, filename))if newest: if mt > cutoff:return Trueelif mt < cutoff:return Truereturn False", "docstring": "Compare the newest/oldest mtime for all files in a directory.\n\n Cutoff should be another mtime to be compared against. If an mtime that is\n newer/older than the cutoff is found it will return True.\n E.g. if newest=True, and a file in path is newer than the cutoff, it will\n return True.", "id": "f9671:m9"} {"signature": "def _get_files(file_patterns, top=HERE):", "body": "if not isinstance(file_patterns, (list, tuple)):file_patterns = [file_patterns]for i, p in enumerate(file_patterns):if os.path.isabs(p):file_patterns[i] = os.path.relpath(p, top)matchers = [_compile_pattern(p) for p in file_patterns]files = set()for root, dirnames, filenames in os.walk(top):if '' in dirnames:dirnames.remove('')for m in matchers:for filename in filenames:fn = os.path.relpath(pjoin(root, filename), top)if m(fn):files.add(fn.replace(os.sep, ''))return list(files)", "docstring": "Expand file patterns to a list of paths.\n\n Parameters\n -----------\n file_patterns: list or str\n A list of glob patterns for the data file locations.\n The globs can be recursive if they include a `**`.\n They should be relative paths from the top directory or\n absolute paths.\n top: str\n the directory to consider for data files\n\n Note:\n Files in `node_modules` are ignored.", "id": "f9671:m18"} {"signature": "def which(cmd, mode=os.F_OK | os.X_OK, path=None):", "body": "def _access_check(fn, mode):return (os.path.exists(fn) and os.access(fn, mode) andnot os.path.isdir(fn))if _access_check(cmd, mode):return cmdpath = (path or os.environ.get(\"\", os.defpath)).split(os.pathsep)if sys.platform == \"\":if os.curdir not in path:path.insert(, os.curdir)pathext = os.environ.get(\"\", \"\").split(os.pathsep)matches = [cmd for ext in pathext if cmd.lower().endswith(ext.lower())]files = [cmd] if matches else [cmd + ext.lower() for ext in pathext]else:files = [cmd]seen = set()for dir in path:dir = os.path.normcase(dir)if dir not in seen:seen.add(dir)for thefile in files:name = os.path.join(dir, thefile)if _access_check(name, mode):return namereturn None", "docstring": "Given a command, mode, and a PATH string, return the path which\n conforms to the given mode on the PATH, or None if there is no such\n file.\n `mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result\n of os.environ.get(\"PATH\"), or can be overridden with a custom search\n path.", "id": "f9671:m14"} {"signature": "def _get_package_data(root, file_patterns=None):", "body": "if file_patterns is None:file_patterns = ['']return _get_files(file_patterns, pjoin(HERE, root))", "docstring": "Expand file patterns to a list of `package_data` paths.\n\n Parameters\n -----------\n root: str\n The relative path to the package root from `HERE`.\n file_patterns: list or str, optional\n A list of glob patterns for the data file locations.\n The globs can be recursive if they include a `**`.\n They should be relative paths from the root or\n absolute paths. If not given, all files will be used.\n\n Note:\n Files in `node_modules` are ignored.", "id": "f9671:m19"} {"signature": "def _translate_glob(pat):", "body": "translated_parts = []for part in _iexplode_path(pat):translated_parts.append(_translate_glob_part(part))os_sep_class = '' % re.escape(SEPARATORS)res = _join_translated(translated_parts, os_sep_class)return ''.format(res=res)", "docstring": "Translate a glob PATTERN to a regular expression.", "id": "f9671:m22"} {"signature": "def img_from_vgg(x):", "body": "x = x.transpose((, , ))x[:, :, ] += x[:, :, ] += x[:, :, ] += x = x[:,:,::-] return x", "docstring": "Decondition an image from the VGG16 model.", "id": "f9677:m0"} {"signature": "def get(value):", "body": "if not isinstance(value, Token):raise TypeError('')if not hasattr(value, ''):raise TypeError('')if not value.identifier:value = value.__class__(**value.__dict__)value.identifier = ''ident = Identifier(value.identifier)return Query([Match(value),Return(ident)])", "docstring": "Query to get the value.", "id": "f9687:m1"} {"signature": "def range(self, index, *args):", "body": "self.data[''].append(''%(index,''.join(map(smart_str, args))))return self.parent", "docstring": "Set the range of each axis, one at a time\nargs are of the form ,,\nAPIPARAM: chxr", "id": "f9693:c0:m6"} {"signature": "def save(self, fname=None):", "body": "if not fname:fname = self.getname()assert fname != None, ''if not fname.endswith(''):fname += ''try:urlretrieve(self.url, fname)except Exception:raise IOError(''%fname)return fname", "docstring": "Download the chart from the URL into a filename as a PNG\n\nThe filename defaults to the chart title (chtt) if any", "id": "f9693:c1:m32"} {"signature": "def level_data(self, *args):", "body": "assert args[].lower() in '', ''%levelself[''] = ''%argsreturn self", "docstring": "Just used in QRCode for the moment\nargs are error_correction,margin_size\nAPIPARAM: chld", "id": "f9693:c1:m3"} {"signature": "def scale(self, *args):", "body": "self._scale = [''.join(map(smart_str, args))]return self", "docstring": "Scales the data down to the given size\nargs must be of the form::\n ,\n ,\n ,\n \nwill only work with text encoding!\nAPIPARAM: chds", "id": "f9693:c1:m7"} {"signature": "def label(self, index, *args):", "body": "self.data[''].append(str(''%(index, ''.join(map(str,args)) )).replace('',''))return self.parent", "docstring": "Label each axes one at a time\nargs are of the form