docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
loads all rdf files in a directory args: directory: full path to the directory
def load_directory(self, directory, **kwargs): log.setLevel(kwargs.get("log_level", self.log_level)) conn = self.__get_conn__(**kwargs) file_extensions = kwargs.get('file_extensions', conn.rdf_formats) file_list = list_files(directory, file_extensions, kwargs.get('include_subfolders', False), include_root=True) for file in file_list: self.load_file(file[1], **kwargs) log.setLevel(self.log_level)
1,149,569
Decorate a function to be the helper function of commands. Arguments: commands: Names of command that should trigger this function object. --------------------------- Interface of helper methods: @helper('some-command') def help_foo(self, args): ''' Arguments: args: A list of arguments. Returns: A string that is the help message. ''' pass
def helper(*commands): def decorated_func(f): f.__help_targets__ = list(commands) return f return decorated_func
1,149,688
Process a string in batch mode. Arguments: content: A unicode string representing the content to be processed.
def batch_string(self, content): pipe_send, pipe_recv = multiprocessing.Pipe() self._pipe_end = pipe_recv proc = multiprocessing.Process(target = self.cmdloop) for line in content.split('\n'): pipe_send.send(line) pipe_send.close() proc.start() proc.join()
1,149,696
Driver level completer. Arguments: toks: A list of tokens, tokenized from the original input line. text: A string, the text to be replaced if a completion candidate is chosen. state: An integer, the index of the candidate out of the list of candidates. Returns: A string, the candidate.
def __driver_completer(self, toks, text, state): if state != 0: return self.__completion_candidates[state] # Update the cache when this method is first called, i.e., state == 0. # If the line is empty or the user is still inputing the first token, # complete with available commands. if not toks or (len(toks) == 1 and text == toks[0]): try: self.__completion_candidates = self.__complete_cmds(text) except: self.stderr.write('\n') self.stderr.write(traceback.format_exc()) self.__completion_candidates = [] return self.__completion_candidates[state] # Otherwise, try to complete with the registered completer method. cmd = toks[0] args = toks[1:] if len(toks) > 1 else None if text and args: del args[-1] if cmd in self._completer_map.keys(): completer_name = self._completer_map[cmd] completer_method = getattr(self, completer_name) try: self.__completion_candidates = completer_method(cmd, args, text) except: self.stderr.write('\n') self.stderr.write(traceback.format_exc()) self.__completion_candidates = [] else: self.__completion_candidates = [] return self.__completion_candidates[state]
1,149,701
Driver level helper method. 1. Display help message for the given input. Internally calls self.__get_help_message() to obtain the help message. 2. Re-display the prompt and the input line. Arguments: line: The input line. Raises: Errors from helper methods print stack trace without terminating this shell. Other exceptions will terminate this shell.
def __driver_helper(self, line): if line.strip() == '?': self.stdout.write('\n') self.stdout.write(self.doc_string()) else: toks = shlex.split(line[:-1]) try: msg = self.__get_help_message(toks) except Exception as e: self.stderr.write('\n') self.stderr.write(traceback.format_exc()) self.stderr.flush() self.stdout.write('\n') self.stdout.write(msg) # Restore the prompt and the original input. self.stdout.write('\n') self.stdout.write(self.prompt) self.stdout.write(line) self.stdout.flush()
1,149,703
Find a review score from a given reviewer to a product. Args: reviewer: Reviewer i.e. an instance of :class:`ria.bipartite.Reviewer`. product: Product i.e. an instance of :class:`ria.bipartite.Product`. Returns: A review object representing the review from the reviewer to the product.
def review_score(self, reviewer, product): return self._g.retrieve_review(reviewer, product).score
1,149,718
Compute credibility of a given product. Args: product: An instance of :class:`bipartite.Product`. Returns: The credibility of the product. It is >= 0.5.
def __call__(self, product): reviewers = self.reviewers(product) Nq = len(reviewers) if Nq == 1: return 0.5 else: # Computing the unbiased variance of scores. var = np.var([self.review_score(r, product) for r in reviewers], ddof=1) return np.log(Nq) / (var + 1)
1,149,719
checks for changes in the vocabulary and mod times of the files to see if the cache should be used. Args: cache: the kwarg passed in to use the cache during __init__ Returns: Bool: True = use the cache files False = requery the triplestore
def __use_cache__(self, cache): # check for changes in the file mod times try: cache_mod = os.path.getmtime(self.cache_filepath) except FileNotFoundError: return False last_file_mod = sorted( \ self.conn.mgr.loaded_times.values())[-1].timestamp() if last_file_mod > cache_mod: return False curr_load = set(self.conn.mgr.loaded) # check to see if there is a change in the loaded files try: with open(self.loaded_filepath, "r") as fo: loaded_files = set(json.loads(fo.read())) if curr_load != loaded_files: return False except FileNotFoundError: return False # otherwise return the orginal cache init kwarg value return cache
1,149,725
Gets the defitions args: cache: True will read from the file cache, False queries the triplestore
def get_defs(self, cache=True): log.debug(" *** Started") cache = self.__use_cache__(cache) if cache: log.info(" loading json cache") try: with open(self.cache_filepath) as file_obj: self.results = json.loads(file_obj.read()) except FileNotFoundError: self.results = [] if not cache or len(self.results) == 0: log.info(" NO CACHE, querying the triplestore") sparql = render_without_request(self.def_sparql, graph=self.conn.graph, prefix=self.nsm.prefix()) start = datetime.datetime.now() log.info(" Starting query") self.results = self.conn.query(sparql) log.info("query complete in: %s | %s triples retrieved.", (datetime.datetime.now() - start), len(self.results)) with open(self.cache_filepath, "w") as file_obj: file_obj.write(json.dumps(self.results, indent=4)) with open(self.loaded_filepath, "w") as file_obj: file_obj.write((json.dumps(self.conn.mgr.loaded)))
1,149,726
Runs through the classess and ties the properties to the class args: class_list: a list of class names to run
def tie_properties(self, class_list): log.setLevel(self.log_level) start = datetime.datetime.now() log.info(" Tieing properties to the class") for cls_name in class_list: cls_obj = getattr(MODULE.rdfclass, cls_name) prop_dict = dict(cls_obj.properties) for prop_name, prop_obj in cls_obj.properties.items(): setattr(cls_obj, prop_name, link_property(prop_obj, cls_obj)) log.info(" Finished tieing properties in: %s", (datetime.datetime.now() - start))
1,149,733
Parse title from alternative location if not found where it should be. Args: html_chunk (obj): HTMLElement containing slice of the page with details. Returns: str: Book's title.
def _parse_alt_title(html_chunk): title = html_chunk.find("img", fn=has_param("alt")) if not title: raise UserWarning("Can't find alternative title source!") return title[0].params["alt"].strip()
1,149,770
Parse URL from alternative location if not found where it should be. Args: html_chunk (obj): HTMLElement containing slice of the page with details. Returns: str: Book's URL.
def _parse_alt_url(html_chunk): url_list = html_chunk.find("a", fn=has_param("href")) url_list = map(lambda x: x.params["href"], url_list) url_list = filter(lambda x: not x.startswith("autori/"), url_list) if not url_list: return None return normalize_url(BASE_URL, url_list[0])
1,149,771
Parse title/name of the book and URL of the book. Args: html_chunk (obj): HTMLElement containing slice of the page with details. Returns: tuple: (title, url), both as strings.
def _parse_title_url(html_chunk): url = None title_tags = html_chunk.match( ["div", {"class": "polozka_nazev"}], ["a", None, has_param("href")] ) if not title_tags: return _parse_alt_title(html_chunk), _parse_alt_url(html_chunk) title = title_tags[0] url = normalize_url(BASE_URL, title.params["href"]) title = title.getContent() if not title: title = _parse_alt_title(html_chunk) return title, url
1,149,772
Parse authors of the book. Args: html_chunk (obj): HTMLElement containing slice of the page with details. Returns: list: List of :class:`structures.Author` objects. Blank if no author \ found.
def _parse_authors(html_chunk): authors_tags = html_chunk.match( ["div", {"class": "polozka_autor"}], "a" ) authors = [] for author_tag in authors_tags: # get name name = author_tag.getContent().strip() # skip tags without name if not name: continue # get url - if not found, set it to None url = author_tag.params.get("href", None) if url: url = normalize_url(BASE_URL, url) authors.append( Author(name, url) ) return authors
1,149,773
Parse description of the book. Args: html_chunk (obj): HTMLElement containing slice of the page with details. Returns: str/None: Description as string or None if not found.
def _parse_description(html_chunk): description_tag = html_chunk.match( ["div", {"class": "kniha_detail_text"}], "p" ) if not description_tag: return None description = get_first_content(description_tag) description = description.replace("<br />", "\n") description = description.replace("<br/>", "\n") return dhtmlparser.removeTags(description).strip()
1,149,775
Parse available informations about book from the book details page. Args: html_chunk (obj): HTMLElement containing slice of the page with details. Returns: obj: :class:`structures.Publication` instance with book details.
def _process_book(html_chunk): title, book_url = _parse_title_url(html_chunk) # download page with details data = DOWNER.download(book_url) dom = dhtmlparser.parseString( handle_encodnig(data) ) details = dom.find("div", {"id": "kniha_detail"})[0] # required parameters pub = Publication( title=title, authors=_parse_authors(html_chunk), price=_parse_price(details), publisher="CPress" ) # optional parameters pub.optionals.URL = book_url pub.optionals.EAN = _parse_ean(details) pub.optionals.format = _parse_format(details) pub.optionals.pub_date = _parse_date(details) pub.optionals.description = _parse_description(details) return pub
1,149,776
Return a dictionary with numberified data Arguments: source -- source of data(filename or a list) start -- starting index of numbering
def numberify_data(self, source, start=1): if type(source) is str: try: fd = open(source, 'r+') data = fd.readlines() for index, item in enumerate(data): data[index] = item.strip('\n') fd.close() except IOError as e: print 'I/O error({0}): {1}'.format(e.errno, e.strerror) return False elif type(source) is list: data = source else: print 'Data error. Pass a filename or a list' return False return dict(list(enumerate(data, start)))
1,149,787
Bulk adds rdf data to the class args: data: the data to be loaded kwargs: strip_orphans: True or False - remove triples that have an orphan blanknode as the object obj_method: "list", or None: if "list" the object of a method will be in the form of a list.
def load_data(self, data, **kwargs): self.__set_map__(**kwargs) start = datetime.datetime.now() log.debug("Dataload stated") if isinstance(data, list): data = self._convert_results(data, **kwargs) class_types = self.__group_data__(data, **kwargs) # generate classes and add attributes to the data self._generate_classes(class_types, self.non_defined, **kwargs) # add triples to the dataset for triple in data: self.add_triple(sub=triple, **kwargs) log.debug("Dataload completed in '%s'", (datetime.datetime.now() - start))
1,149,959
processes the data in to groups prior to loading into the dataset args: data: a list of triples
def __group_data__(self, data, **kwargs): # strip all of the rdf_type triples and merge class_types = self._merge_classtypes(self.__get_classtypes__(data)) self.subj_list = list([item[self.smap] for item in class_types]) # get non defined classes self.non_defined = self._get_non_defined(data, class_types) return class_types
1,149,960
creates the class for each class in the data set args: class_types: list of class_types in the dataset non_defined: list of subjects that have no defined class
def _generate_classes(self, class_types, non_defined, **kwargs): # kwargs['dataset'] = self for class_type in class_types: self[class_type[self.smap]] = self._get_rdfclass(class_type, **kwargs)\ (class_type, self, **kwargs) self.add_rmap_item(self[class_type[self.smap]], class_type[self.pmap], class_type[self.omap]) for class_type in non_defined: self[class_type] = RdfClassBase(class_type, self, **kwargs) self.add_rmap_item(self[class_type], __a__, None) self.__set_classes__ try: self.base_class = self[self.base_uri] except KeyError: self.base_class = None
1,149,965
returns the instanticated class from the class list args: class_type: dictionary with rdf_types
def _get_rdfclass(self, class_type, **kwargs): def select_class(class_name): try: return getattr(MODULE.rdfclass, class_name.pyuri) except AttributeError: return RdfClassBase if kwargs.get("def_load"): return RdfClassBase if isinstance(class_type[self.omap], list): bases = [select_class(class_name) for class_name in class_type[self.omap]] bases = [base for base in bases if base != RdfClassBase] if len(bases) == 0: return RdfClassBase elif len(bases) == 1: return bases[0] else: bases = remove_parents(bases) if len(bases) == 1: return bases[0] else: name = "_".join(sorted(class_type[self.omap])) # if the the class has already been created return it if hasattr(MODULE.rdfclass, name): return getattr(MODULE.rdfclass, name) new_class = type(name, tuple(bases), {}) new_class.hierarchy = list_hierarchy(class_type[self.omap][0], bases) new_class.class_names = sorted([base.__name__ \ for base in bases \ if base not in [RdfClassBase, dict]]) setattr(MODULE.rdfclass, name, new_class) return new_class else: return select_class(class_type[self.omap])
1,149,966
returns a list of URIs and blanknodes that are not defined within the dataset. For example: schema:Person has an associated rdf:type then it is considered defined. args: data: a list of triples class_types: list of subjects that are defined in the dataset
def _get_non_defined(self, data, class_types): subj_set = set([item[self.smap] for item in class_types]) non_def_set = set([item[self.smap] for item in data]) return list(non_def_set - subj_set)
1,149,968
converts the results of a query to RdfDatatype instances args: data: a list of triples
def _convert_results(self, data, **kwargs): if kwargs.get("multiprocessing", False): m = mp.Manager() output = m.Queue() pdb.set_trace() # processes = [mp.Process(target=convert_row_main, # args=(row, output,)) # for row in data] # # Run processes # for p in processes: # p.start() # # Exit the completed processes # for p in processes: # p.join() # # Get process results from the output queue # return [output.get() for p in processes] pool = mp.Pool(processes=pool_size) for i, row in enumerate(data): for key, val in row.items(): try: pool.apply(convert_row_main, args=(val, i, key, output,)) except: pass # # run = [pool.apply(convert_row_main, args=(row, i, output)) # for i, row in enumerate(data)] for item in output: pdb.set_trace() return output # with multiprocessing.Pool(processes=pool_size) as pool: # results = [convert_row_main, (row,)) # for row in data] # converted = [r.get() for r in results] # return converted #pool_outputs else: return [{key:pyrdf(value) for key, value in row.items()} for row in data]
1,149,969
A dict representation of this User instance. The return value uses the same key names as the JSON representation. Args: dt (bool): If True, return dates as python datetime objects. If False, return dates as ISO strings. Return: A dict representing this User instance
def AsDict(self, dt=True): data = {} if self.name: data['name'] = self.name data['mlkshk_url'] = self.mlkshk_url if self.profile_image_url: data['profile_image_url'] = self.profile_image_url if self.id: data['id'] = self.id if self.about: data['about'] = self.about if self.website: data['website'] = self.website if self.shakes: data['shakes'] = [shk.AsDict(dt=dt) for shk in self.shakes] data['shake_count'] = self.shake_count return data
1,150,049
Create a new User instance from a JSON dict. Args: data (dict): JSON dictionary representing a user. Returns: A User instance.
def NewFromJSON(data): if data.get('shakes', None): shakes = [Shake.NewFromJSON(shk) for shk in data.get('shakes')] else: shakes = None return User( id=data.get('id', None), name=data.get('name', None), profile_image_url=data.get('profile_image_url', None), about=data.get('about', None), website=data.get('website', None), shakes=shakes)
1,150,051
Compare two user objects against one another. Args: other (User): another User object against which to compare the current user.
def __eq__(self, other): try: return other and \ self.id == other.id and \ self.name == other.name and \ self.profile_image_url == other.profile_image_url and \ self.about == other.about and \ self.website == other.website and \ self.shakes == other.shakes except AttributeError: return False
1,150,052
A dict representation of this Comment instance. The return value uses the same key names as the JSON representation. Args: dt (bool): If True, return dates as python datetime objects. If False, return dates as ISO strings. Return: A dict representing this Comment instance
def AsDict(self, dt=True): data = {} if self.body: data['body'] = self.body if self.posted_at: data['posted_at'] = self.posted_at if self.user: data['user'] = self.user.AsDict() return data
1,150,053
Create a new Comment instance from a JSON dict. Args: data (dict): JSON dictionary representing a Comment. Returns: A Comment instance.
def NewFromJSON(data): return Comment( body=data.get('body', None), posted_at=data.get('posted_at', None), user=User.NewFromJSON(data.get('user', None)) )
1,150,054
Create a new Shake instance from a JSON dict. Args: data (dict): JSON dictionary representing a Shake. Returns: A Shake instance.
def NewFromJSON(data): s = Shake( id=data.get('id', None), name=data.get('name', None), url=data.get('url', None), thumbnail_url=data.get('thumbnail_url', None), description=data.get('description', None), type=data.get('type', None), created_at=data.get('created_at', None), updated_at=data.get('updated_at', None) ) if data.get('owner', None): s.owner = User.NewFromJSON(data.get('owner', None)) return s
1,150,058
A dict representation of this Shake instance. The return value uses the same key names as the JSON representation. Args: dt (bool): If True, return dates as python datetime objects. If False, return dates as ISO strings. Return: A dict representing this Shake instance
def AsDict(self, dt=True): data = {} if self.sharekey: data['sharekey'] = self.sharekey if self.name: data['name'] = self.name if self.user: data['user'] = self.user.AsDict() if self.title: data['title'] = self.title if self.description: data['description'] = self.description if self.posted_at: if dt: data['posted_at'] = self.posted_at else: data['posted_at'] = self.posted_at_iso if self.permalink: data['permalink'] = self.permalink if self.width: data['width'] = self.width if self.height: data['height'] = self.height if self.image_url: data['image_url'] = self.image_url if self.source_url: data['source_url'] = self.source_url data['views'] = self.views data['likes'] = self.likes data['saves'] = self.saves data['comments'] = self.comments data['nsfw'] = self.nsfw data['saved'] = self.saved data['liked'] = self.liked return data
1,150,061
Create a new SharedFile instance from a JSON dict. Args: data (dict): JSON dictionary representing a SharedFile. Returns: A SharedFile instance.
def NewFromJSON(data): return SharedFile( sharekey=data.get('sharekey', None), name=data.get('name', None), user=User.NewFromJSON(data.get('user', None)), title=data.get('title', None), description=data.get('description', None), posted_at=data.get('posted_at', None), permalink=data.get('permalink', None), width=data.get('width', None), height=data.get('height', None), views=data.get('views', 0), likes=data.get('likes', 0), saves=data.get('saves', 0), comments=data.get('comments', None), nsfw=data.get('nsfw', False), image_url=data.get('image_url', None), source_url=data.get('source_url', None), saved=data.get('saved', False), liked=data.get('liked', False), )
1,150,062
Parse Arguments Used to parse the arguments passed to the script Args: args (list): A list of strings representing arguments to a script Returns: dict: Returns a dictionary with args as keys and the values sent with them or True for valueless arguments Raises: ValueError: If args is not a list or tuple
def parseArgs(args): # If args is not a list if not isinstance(args, (list,tuple)): raise ValueError('args is not a list or tuple') # Init the return value dRet = {} # Go through each argument for s in args: # Check the string matches the format oRes = re.match(u'^--([^=]+)(?:=(.+))?$', s) # If we have a match if oRes: # Store it by name and value mGroup2 = oRes.group(2) dRet[oRes.group(1)] = (not mGroup2 and True or mGroup2) # Else add it to the unknowns else: try: dRet['?'].append(s) except KeyError: dRet['?'] = [s] # Return the dict return dRet
1,150,238
Return 'You' if value is equal to arg. Parameters: value should be a userprofile arg should be another user. Ideally, value should be a userprofile from an object and arg the user logged in.
def display_user(value, arg): if value.user == arg and arg.username != ANONYMOUS_USERNAME: return "You" else: return value.user.get_full_name()
1,150,568
SCFilter(clslist) Args: clslist (list): List of classes from which to build the filter Returns: new SCFilter instance
def __init__(self, clslist): if not hasattr(clslist, '__contains__'): clslist = [clslist] self.required = reduce(set.union, (cls.required for cls in clslist if issubclass(cls, AttributeMapper))) self.optional = reduce(set.union, (cls.optional for cls in clslist if issubclass(cls, AttributeMapper))) self.optional.symmetric_difference_update(self.required)
1,150,861
Applies a function to the contained :meth:`Result.Ok` value. Args: op: The function to apply to the :meth:`Result.Ok` value. Returns: A :class:`Result` with its success value as the function result if `self` is an :meth:`Result.Ok` value, otherwise returns `self`. Examples: >>> Ok(1).map(lambda x: x * 2) Ok(2) >>> Err(1).map(lambda x: x * 2) Err(1)
def map(self, op: Callable[[T], U]) -> 'Union[Result[U, E], Result[T, E]]': return self._type.Ok(op(cast(T, self._val))) if self._is_ok else self
1,151,414
Applies a function to the contained :meth:`Result.Err` value. Args: op: The function to apply to the :meth:`Result.Err` value. Returns: A :class:`Result` with its error value as the function result if `self` is a :meth:`Result.Err` value, otherwise returns `self`. Examples: >>> Ok(1).map_err(lambda x: x * 2) Ok(1) >>> Err(1).map_err(lambda x: x * 2) Err(2)
def map_err(self, op: Callable[[E], F]) -> 'Union[Result[T, F], Result[T, E]]': return self if self._is_ok else cast( 'Result[T, F]', self._type.Err(op(cast(E, self._val))) )
1,151,416
Returns the sucess value in the :class:`Result` or computes a default from the error value. Args: op: The function to computes default with. Returns: The success value in the :class:`Result` if it is a :meth:`Result.Ok` value, otherwise ``op(E)``. Examples: >>> Ok(1).unwrap_or_else(lambda e: e * 10) 1 >>> Err(1).unwrap_or_else(lambda e: e * 10) 10
def unwrap_or_else(self, op: Callable[[E], U]) -> Union[T, U]: return cast(T, self._val) if self._is_ok else op(cast(E, self._val))
1,151,419
Initialize class and spawn self as Base Class w/o nargs Args: option_strings (list): list of str giving command line flags that call this action dest (str): namespace reference to value copyright_text (str): str to print nargs (str): number of args as special char or int **kwargs (various): optional arguments to pass to super call
def __init__(self, option_strings, dest, copyright_text=None, nargs=None, **kwargs): # Only accept a single value to analyze if nargs is not None: raise ValueError('nargs not allowed for CopyRight') self.copyright = copyright_text # Call self again but without nargs super(CopyRight, self).__init__(option_strings, dest, nargs=0, **kwargs)
1,152,038
Add a channel to subscribe to and a callback function to run when the channel receives an update. If channel already exists, create a new "subscription" and append another callback function. Args: channel (str): The channel to add a subscription too. callback_function (func): The function to run on an update to the passed in channel.
def add_subscription(self, channel, callback_function): if channel not in CHANNELS: CHANNELS.append(channel) SUBSCRIPTIONS[channel] = [callback_function] else: SUBSCRIPTIONS[channel].append(callback_function) # If a channel gets added after subscription has already been called # call subscribe on the individual channel, here. if self._subscribed: _LOGGER.info("New channel added after main subscribe call.") self._pubnub.subscribe().channels(channel).execute()
1,152,104
Runs a sparql query and returns the results Args: ----- sparql: the sparql query to run namespace: the namespace to run the sparql query against mode: ['get'(default), 'update'] the type of sparql query rtn_format: ['json'(default), 'xml'] format of query results Kwargs: ------- debug(bool): If True sets logging level to debug
def query(self, sparql, mode="get", namespace=None, rtn_format="json", **kwargs): namespace = pick(namespace, self.namespace) if kwargs.get("log_level"): log.setLevel(kwargs['log_level']) if kwargs.get("debug"): log.setLevel(logging.DEBUG) if rtn_format not in self.qry_formats: raise KeyError("rtn_format was '%s'. Allowed values are %s" % \ (rtn_format, self.qry_results_formats)) url = self._make_url(namespace) if 'prefix' not in sparql.lower(): sparql = "%s\n%s" % (NSM.prefix(), sparql) if mode == "get": data = {"query": sparql} #, "format": rtn_format} elif mode == "update": data = {"update": sparql} else: raise NotImplementedError("'mode' != to ['get', 'update']") headers = {'Accept': self.qry_formats[rtn_format]} start = datetime.datetime.now() try: result = requests.post(url, data=data, headers=headers) except requests.exceptions.ConnectionError: result = requests.post(self._make_url(namespace, self.local_url), data=data, headers=headers) log.debug(format_multiline(["", "url='{url}'", , "**** SPAQRL QUERY ****", "", "{sparql}", "Query Time: {q_time}"], url=url, mode=mode, namespace=namespace, rtn_format=rtn_format, sparql=sparql, q_time=(datetime.datetime.now()-start), **kwargs)) if result.status_code == 200: try: if rtn_format == "json": bindings = result.json().get('results', {}).get('bindings', []) elif rtn_format == 'xml': xml_doc = etree.XML(result.text) bindings = xml_doc.findall("results/bindings") else: bindings = result.text try: log.debug("result count: %s", len(bindings)) except TypeError: pass return bindings except json.decoder.JSONDecodeError: if mode == 'update': return BeautifulSoup(result.text, 'lxml').get_text() return result.text else: raise SyntaxError("%s\n\n%s\n\n%s" % (sparql, add_sparql_line_nums(sparql), result.text[result.text.find("java."):]))
1,152,782
tests to see if the namespace exists args: namespace: the name of the namespace
def has_namespace(self, namespace): result = requests.get(self._make_url(namespace)) if result.status_code == 200: return True elif result.status_code == 404: return False
1,152,786
Deletes a namespace fromt the triplestore args: namespace: the name of the namespace
def delete_namespace(self, namespace): # if not self.has_namespace(namespace): # return "Namespace does not exists" # log = logging.getLogger("%s.%s" % (self.log_name, # inspect.stack()[0][3])) # log.setLevel(self.log_level) url = self._make_url(namespace).replace("/sparql", "") result = requests.delete(url=url) if result.status_code == 200: log.critical(result.text) return result.text raise RuntimeError(result.text)
1,152,788
Creates the REST Url based on the supplied namespace args: namespace: string of the namespace kwargs: check_status_call: True/False, whether the function is called from check_status. Used to avoid recurrsion error
def _make_url(self, namespace=None, url=None, **kwargs): if not kwargs.get("check_status_call"): if not self.url: self.check_status rtn_url = self.url if url: rtn_url = url if rtn_url is None: rtn_url = self.ext_url namespace = pick(namespace, self.namespace) if namespace: rtn_url = os.path.join(rtn_url.replace("sparql", ""), "namespace", namespace, "sparql").replace("\\", "/") elif not rtn_url.endswith("sparql"): rtn_url = os.path.join(rtn_url, "sparql").replace("\\", "/") return rtn_url
1,152,789
Will delete and recreate specified namespace args: namespace(str): Namespace to reset params(dict): params used to reset the namespace
def reset_namespace(self, namespace=None, params=None): log = logging.getLogger("%s.%s" % (self.log_name, inspect.stack()[0][3])) log.setLevel(self.log_level) namespace = pick(namespace, self.namespace) params = pick(params, self.namespace_params) log.warning(" Reseting namespace '%s' at host: %s", namespace, self.url) try: self.delete_namespace(namespace) except RuntimeError: pass self.create_namespace(namespace, params)
1,152,790
Executes the command given specific arguments as an input. Args: correlation_id: a unique correlation/transaction id args: command arguments Returns: an execution result. Raises: MicroserviceError: when execution fails for whatever reason.
def execute(self, correlation_id, args): return self._intercepter.execute(_next, correlation_id, args)
1,152,887
Carries out the action of solving for wavefields. Args: rhs (sparse matrix): Right-hand side vector(s) Returns: np.ndarray: Wavefields
def __mul__(self, rhs): if isinstance(rhs, scipy.sparse.spmatrix): def qIter(qs): for j in range(qs.shape[1]): qi = qs.getcol(j).toarray().ravel() yield qi return else: def qIter(qs): for j in range(qs.shape[1]): qi = qs[:, j] yield qi return result = np.empty(rhs.shape, dtype=np.complex128) for i, q in enumerate(qIter(rhs)): result[:, i] = self._solve(q) return result
1,152,917
Get the SHA of a commit's tree. Args: profile A profile generated from ``simplygithub.authentication.profile``. Such profiles tell this module (i) the ``repo`` to connect to, and (ii) the ``token`` to connect with. sha The SHA of a commit. Returns: The SHA of the commit's tree.
def get_commit_tree(profile, sha): data = commits.get_commit(profile, sha) tree = data.get("tree") sha = tree.get("sha") return sha
1,152,949
Get the files (blobs) in a tree. Args: profile A profile generated from ``simplygithub.authentication.profile``. Such profiles tell this module (i) the ``repo`` to connect to, and (ii) the ``token`` to connect with. sha The SHA of a tree. Returns: A list of dicts containing info about each blob in the tree.
def get_files_in_tree(profile, sha): data = trees.get_tree(profile, sha) tree = data.get("tree") blobs = [x for x in tree if x.get("type") == "blob"] return blobs
1,152,950
Remove a file from a tree. Args: tree A list of dicts containing info about each blob in a tree. file_path The path of a file to remove from a tree. Returns: The provided tree, but with the item matching the specified file_path removed.
def remove_file_from_tree(tree, file_path): match = None for item in tree: if item.get("path") == file_path: match = item break if match: tree.remove(match) return tree
1,152,951
Add a file to a tree. Args: tree A list of dicts containing info about each blob in a tree. file_path The path of the new file in the tree. file_contents The (UTF-8 encoded) contents of the new file. is_executable If ``True``, the new file will get executable permissions (0755). Otherwise, it will get 0644 permissions. Returns: The provided tree, but with the new file added.
def add_file_to_tree(tree, file_path, file_contents, is_executable=False): record = { "path": file_path, "mode": "100755" if is_executable else "100644", "type": "blob", "content": file_contents, } tree.append(record) return tree
1,152,952
Get all files in a branch's tree. Args: profile A profile generated from ``simplygithub.authentication.profile``. Such profiles tell this module (i) the ``repo`` to connect to, and (ii) the ``token`` to connect with. branch_sha The SHA a branch's HEAD points to. Returns: A list of dicts containing info about each blob in the tree.
def get_files_in_branch(profile, branch_sha): tree_sha = get_commit_tree(profile, branch_sha) files = get_files_in_tree(profile, tree_sha) tree = [prepare(x) for x in files] return tree
1,152,953
Get a file from a branch. Args: profile A profile generated from ``simplygithub.authentication.profile``. Such profiles tell this module (i) the ``repo`` to connect to, and (ii) the ``token`` to connect with. branch The name of a branch. file_path The path of the file to fetch. Returns: The (UTF-8 encoded) content of the file, as a string.
def get_file(profile, branch, file_path): branch_sha = get_branch_sha(profile, branch) tree = get_files_in_branch(profile, branch_sha) match = None for item in tree: if item.get("path") == file_path: match = item break file_sha = match.get("sha") blob = blobs.get_blob(profile, file_sha) content = blob.get("content") decoded_content = b64decode(content) return decoded_content.decode("utf-8")
1,152,956
Serialize namedtuples (and other basic python types) to dictionary with some special properties. Args: data (namedtuple/other python types): Data which will be serialized to dict. Data can be later automatically de-serialized by calling _deserializeNT().
def _serializeNT(data): if isinstance(data, list): return [_serializeNT(item) for item in data] elif isinstance(data, tuple) and hasattr(data, "_fields"): # is namedtuple serialized = _serializeNT(dict(data._asdict())) serialized["__nt_name"] = data.__class__.__name__ return serialized elif isinstance(data, tuple): return tuple(_serializeNT(item) for item in data) elif isinstance(data, dict): return { key: _serializeNT(data[key]) for key in data } return data
1,153,011
Compose absolute path for given `pub`. Args: pub (obj): :class:`.DBPublication` instance. uuid_url (bool, default False): Compose URL using UUID. Returns: str: Absolute url-path of the publication, without server's address \ and protocol. Raises: PrivatePublicationError: When the `pub` is private publication.
def compose_path(pub, uuid_url=False): if uuid_url: return join( "/", UUID_DOWNLOAD_KEY, str(pub.uuid) ) return join( "/", DOWNLOAD_KEY, basename(pub.file_pointer), basename(pub.filename) )
1,153,176
Compose absolute path for given `tree`. Args: pub (obj): :class:`.Tree` instance. issn (bool, default False): Compose URL using ISSN. Returns: str: Absolute path of the tree, without server's address and protocol.
def compose_tree_path(tree, issn=False): if issn: return join( "/", ISSN_DOWNLOAD_KEY, basename(tree.issn) ) return join( "/", PATH_DOWNLOAD_KEY, quote_plus(tree.path).replace("%2F", "/"), )
1,153,177
Compose full url for given `pub`, with protocol, server's address and port. Args: pub (obj): :class:`.DBPublication` instance. uuid_url (bool, default False): Compose URL using UUID. Returns: str: Absolute url of the publication. Raises: PrivatePublicationError: When the `pub` is private publication.
def compose_full_url(pub, uuid_url=False): url = compose_path(pub, uuid_url) if WEB_PORT == 80: return "%s://%s%s" % (_PROTOCOL, WEB_ADDR, url) return "%s://%s:%d%s" % (_PROTOCOL, WEB_ADDR, WEB_PORT, url)
1,153,178
Compose full url for given `tree`, with protocol, server's address and port. Args: tree (obj): :class:`.Tree` instance. issn_url (bool, default False): Compose URL using ISSN. Returns: str: URL of the tree
def compose_tree_url(tree, issn_url=False): url = compose_tree_path(tree, issn_url) if WEB_PORT == 80: return "%s://%s%s" % (_PROTOCOL, WEB_ADDR, url) return "%s://%s:%d%s" % (_PROTOCOL, WEB_ADDR, WEB_PORT, url)
1,153,179
Gets all the messages from a specified file. This will find and resolve dependencies, failing if the descriptor pool cannot satisfy them. Args: files: The file names to extract messages from. Returns: A dictionary mapping proto names to the message classes. This will include any dependent messages as well as any messages defined in the same file as a specified message.
def GetMessages(self, files): result = {} for file_name in files: file_desc = self.pool.FindFileByName(file_name) for name, msg in file_desc.message_types_by_name.items(): if file_desc.package: full_name = '.'.join([file_desc.package, name]) else: full_name = msg.name result[full_name] = self.GetPrototype( self.pool.FindMessageTypeByName(full_name)) # While the extension FieldDescriptors are created by the descriptor pool, # the python classes created in the factory need them to be registered # explicitly, which is done below. # # The call to RegisterExtension will specifically check if the # extension was already registered on the object and either # ignore the registration if the original was the same, or raise # an error if they were different. for name, extension in file_desc.extensions_by_name.items(): if extension.containing_type.full_name not in self._classes: self.GetPrototype(extension.containing_type) extended_class = self._classes[extension.containing_type.full_name] extended_class.RegisterExtension(extension) return result
1,153,319
Summary: custom format logger Args: mode (str): The Logger module supprts the following log modes: - log to console / stdout. Log_mode = 'stream' - log to file - log to system logger (syslog) Returns: logger object | TYPE: logging
def getLogger(*args, **kwargs): log_mode = local_config['LOGGING']['LOG_MODE'] # log format - file file_format = '%(asctime)s - %(pathname)s - %(name)s - [%(levelname)s]: %(message)s' # log format - stream stream_format = '%(pathname)s - %(name)s - [%(levelname)s]: %(message)s' # log format - syslog syslog_format = '- %(pathname)s - %(name)s - [%(levelname)s]: %(message)s' # set facility for syslog: if local_config['LOGGING']['SYSLOG_FILE']: syslog_facility = 'local7' else: syslog_facility = 'user' # all formats asctime_format = "%Y-%m-%d %H:%M:%S" # objects logger = logging.getLogger(*args, **kwargs) logger.propagate = False try: if not logger.handlers: # branch on output format, default to stream if mode_assignment(log_mode) == 'FILE': # file handler f_handler = logging.FileHandler(local_config['LOGGING']['LOG_PATH']) f_formatter = logging.Formatter(file_format, asctime_format) #f_formatter = logging.Formatter('%(asctime)s %(processName)s %(name)s [%(levelname)-5s]: %(message)s', asctime_format) f_handler.setFormatter(f_formatter) logger.addHandler(f_handler) logger.setLevel(logging.DEBUG) elif mode_assignment(log_mode) == 'STREAM': # stream handlers s_handler = logging.StreamHandler() s_formatter = logging.Formatter(stream_format) s_handler.setFormatter(s_formatter) logger.addHandler(s_handler) logger.setLevel(logging.DEBUG) elif mode_assignment(log_mode) == 'SYSLOG': sys_handler = logging.handlers.SysLogHandler(address='/dev/log', facility=syslog_facility) sys_formatter = logging.Formatter(syslog_format) sys_handler.setFormatter(sys_formatter) logger.addHandler(sys_handler) logger.setLevel(logging.DEBUG) else: syslog.warning( '%s: [WARNING]: log_mode value of (%s) unrecognized - not supported' % (inspect.stack()[0][3], str(log_mode)) ) ex = Exception( '%s: Unsupported mode indicated by log_mode value: %s' % (inspect.stack()[0][3], str(log_mode)) ) raise ex except OSError as e: raise e return logger
1,153,365
Return the HTML representation of an object. Args: obj: The object to represent cfg: Configuration to add to the current configuration for this operation. Returns: The representation of the object.
def __call__(self, obj, **cfg): depth = self.config.depth or 0 seen_on_path = self.config.seen_on_path or frozenset() cfg.setdefault('depth', depth + 1) cfg['seen_on_path'] = seen_on_path | {id(obj)} h = self.with_config(cfg) max_depth = h.config.max_depth if h.config.preprocess: obj = h.config.preprocess(obj, hrepr) if id(obj) in seen_on_path: # This object is a child of itself, so we display a neat # little loop to avoid busting the stack. result = self.H.span['hrepr-circular']('⥁') elif max_depth is not None and depth >= max_depth: result = h._hrepr(obj, self.type_handlers_short, ['__hrepr_short__'], self.stdrepr_short) else: result = h._hrepr(obj, self.type_handlers, ['__hrepr__', '__hrepr_short__'], self.stdrepr) if h.config.postprocess: return h.config.postprocess(obj, result, h.H, h) or result else: return result
1,153,587
Store the resources returned by ``source()``. If ``source`` has been acquired before, it will not be called a second time. Args: source (callable): A function that returns a resource or a list of resources. Returns: None
def acquire_resources(self, source): if source not in self.consulted: self.consulted.add(source) if isinstance(source, Tag): res = source else: res = source(self.H) if res is None: res = set() elif isinstance(res, (list, tuple)): res = set(res) elif isinstance(res, Tag): res = {res} self.resources |= res
1,153,591
sets the processed data to the appropriated property attribute Args: ----- prop: the property being manipulated data: the list of processed data
def __set_data__(self, prop, data): if self.data_attr: setattr(prop, self.data_attr, data) else: rm_idxs = [] for i, val in enumerate(prop): if val not in data: rm_idxs.append(i) for idx in sorted(rm_idxs, reverse=True): prop.pop(idx) for val in data: if val not in prop: prop.append(val)
1,153,690
Method takes a binding extracts value and returns rdflib entity Args: binding: binding row
def __get_object__(binding): if isinstance(binding, rdflib.term.Node): return binding elif isinstance(binding, collections.Iterable): for key, row in binding.items(): if isinstance(row, (rdflib.URIRef, rdflib.Literal)): return row elif isinstance(row, dict): if row.get('type').startswith('uri'): return rdflib.URIRef(row.get('value')) return rdflib.Literal(row.get('value')) elif isinstance(row, tuple): print(row) elif isinstance(row, str): if row.startswith("literal") or "xml:lang" in key: continue return rdflib.Literal(row)
1,153,939
Internal method takes a datatype (can be None) and returns the RDF Object Term Args: ----- datatype: None, or rdflib.URIRef value: Varys depending on ingester
def __generate_object_term__(self, datatype, value): if datatype == NS_MGR.xsd.anyURI.rdflib: term = rdflib.URIRef(value) elif datatype: term = rdflib.Literal(value, datatype=datatype) else: term = rdflib.Literal(value) return term
1,153,943
Creates a SimpleNamespace for the TripelMap's logicalSource Args: ----- map_iri: URIRef
def __logical_source__(self, map_iri): # pdb.set_trace() logical_source = SimpleNamespace() logical_src_bnode = self.rml.value( subject=map_iri, predicate=NS_MGR.rml.logicalSource.rdflib) if logical_src_bnode is None: return logical_source.source = self.rml.value( subject=logical_src_bnode, predicate=NS_MGR.rml.source.rdflib) logical_source.reference_formulations = [r for r in self.rml.objects( subject=logical_src_bnode, predicate=NS_MGR.rml.referenceFormulation.rdflib)] logical_source.iterator = self.rml.value( subject=logical_src_bnode, predicate=NS_MGR.rml.iterator.rdflib) query = self.rml.value( subject=logical_src_bnode, predicate=NS_MGR.rml.query.rdflib) json_query = self.rml.value( subject=logical_src_bnode, predicate=NS_MGR.rml.reference.rdflib) json_key = self.rml.value( subject=logical_src_bnode, predicate=NS_MGR.rml.key.rdflib) if query is not None: logical_source.query = query if json_query is not None: self.use_json_qry = True self.default_use_json_qry = True logical_source.json_query = json_query logical_source.json_key = json_key return logical_source
1,153,945
Creates a SimpleNamespace for the TripleMap's subjectMap and populates properties from the RML RDF graph Args: ----- map_iri: rdflib.URIRef,TripleMap IRI Returns: -------- SimpleNamespace
def __subject_map__(self, map_iri): subject_map = SimpleNamespace() subject_map_bnode = self.rml.value( subject=map_iri, predicate=NS_MGR.rr.subjectMap.rdflib) if subject_map_bnode is None: return #! Should look at supporting multiple rr:class definitions subject_map.class_ = self.rml.value( subject=subject_map_bnode, predicate=getattr(NS_MGR.rr, "class").rdflib) subject_map.template = self.rml.value( subject=subject_map_bnode, predicate=NS_MGR.rr.template.rdflib) subject_map.termType = self.rml.value( subject=subject_map_bnode, predicate=NS_MGR.rr.termType.rdflib) subject_map.deduplicate = self.rml.value( subject=subject_map_bnode, predicate=NS_MGR.kds.deduplicate.rdflib) subject_map.reference = self.rml.value( subject=subject_map_bnode, predicate=NS_MGR.rr.reference.rdflib) return subject_map
1,153,946
Iterates through rr:predicateObjectMaps for this TripleMap creating a SimpleNamespace for each triple map and assigning the constant, template, parentTripleMap, reference as properties. Args: ----- map_iri: rdflib.URIRef, TripleMap IRI Returns: -------- list: List of predicate_object Namespace objects
def __predicate_object_map__(self, map_iri): pred_obj_maps = [] for pred_obj_map_bnode in self.rml.objects( subject=map_iri, predicate=NS_MGR.rr.predicateObjectMap.rdflib): pred_obj_map = SimpleNamespace() pred_obj_map.predicate = self.rml.value( subject=pred_obj_map_bnode, predicate=NS_MGR.rr.predicate.rdflib) obj_map_bnode = self.rml.value( subject=pred_obj_map_bnode, predicate=NS_MGR.rr.objectMap.rdflib) if obj_map_bnode is None: continue pred_obj_map.constant = self.rml.value( subject=obj_map_bnode, predicate=NS_MGR.rr.constant.rdflib) pred_obj_map.template = self.rml.value( subject=obj_map_bnode, predicate=NS_MGR.rr.template.rdflib) pred_obj_map.parentTriplesMap = self.rml.value( subject=obj_map_bnode, predicate=NS_MGR.rr.parentTriplesMap.rdflib) if pred_obj_map.parentTriplesMap is not None: self.parents.add(str(pred_obj_map.parentTriplesMap)) pred_obj_map.reference = self.rml.value( subject=obj_map_bnode, predicate=NS_MGR.rr.reference.rdflib) pred_obj_map.datatype = self.rml.value( subject=obj_map_bnode, predicate=NS_MGR.rr.datatype.rdflib) pred_obj_map.query = self.rml.value( subject=obj_map_bnode, predicate=NS_MGR.rml.query.rdflib) pred_obj_map.json_query = self.rml.value( subject=obj_map_bnode, predicate=NS_MGR.rml.reference.rdflib) json_key = None if hasattr(self.triple_maps[str(map_iri)].logicalSource, 'json_key'): json_key = self.triple_maps[str(map_iri)].logicalSource.json_key pred_obj_map.json_key = pick(self.rml.value( subject=obj_map_bnode, predicate=NS_MGR.rml.key.rdflib), json_key) # BIBCAT Extensions pred_obj_map.delimiters = [] if pred_obj_map.json_query: self.use_json_qry = True for obj in self.rml.objects(subject=obj_map_bnode, predicate=NS_MGR.kds.delimiter.rdflib): pred_obj_map.delimiters.append(obj) pred_obj_maps.append(pred_obj_map) return pred_obj_maps
1,153,947
Generates a RDF entity based on triple map Args: triple_map(SimpleNamespace): Triple Map
def __generate_reference__(self, triple_map, **kwargs): raw_value = self.source.get(str(triple_map.reference)) if raw_value is None or len(raw_value) < 1: return if hasattr(triple_map, "datatype"): if triple_map.datatype == NS_MGR.xsd.anyURI.rdflib: output = rdflib.URIRef(raw_value) else: output = rdflib.Literal( raw_value, datatype=triple_map.datatype) else: output = rdflib.Literal(raw_value) return output
1,153,957
Method executes mapping between CSV source and output RDF args: triple_map(SimpleNamespace): Triple Map
def execute(self, triple_map, output, **kwargs): subject = self.generate_term(term_map=triple_map.subjectMap, **kwargs) start_size = len(output) all_subjects = [] for pred_obj_map in triple_map.predicateObjectMap: predicate = pred_obj_map.predicate if pred_obj_map.template is not None: object_ = self.generate_term(term_map=pred_obj_map, **kwargs) if len(str(object)) > 0: output.add(( subject, predicate, object_)) if pred_obj_map.parentTriplesMap is not None: self.__handle_parents__( parent_map=pred_obj_map.parentTriplesMap, subject=subject, predicate=predicate, **kwargs) if pred_obj_map.reference is not None: object_ = self.generate_term(term_map=pred_obj_map, **kwargs) if object_ and len(str(object_)) > 0: output.add((subject, predicate, object_)) if pred_obj_map.constant is not None: output.add((subject, predicate, pred_obj_map.constant)) finish_size = len(output) if finish_size > start_size: output.add((subject, NS_MGR.rdf.type.rdflib, triple_map.subjectMap.class_)) all_subjects.append(subject) return all_subjects
1,153,958
Methods takes a row and depending if a dict or list, runs RML rules. Args: ----- row(Dict, List): Row from CSV Reader
def run(self, row, **kwargs): self.source = row kwargs['output'] = self.__graph__() super(CSVRowProcessor, self).run(**kwargs) return kwargs['output']
1,153,959
Method executes mapping between JSON source and output RDF Args: ----- triple_map: SimpleNamespace
def execute(self, triple_map, output, **kwargs): subjects = [] logical_src_iterator = str(triple_map.logicalSource.iterator) json_object = kwargs.get('obj', self.source) # Removes '.' as a generic iterator, replace with '@' if logical_src_iterator == ".": results = [None,] else: json_path_exp = jsonpath_ng.parse(logical_src_iterator) results = [r.value for r in json_path_exp.find(json_object)][0] for row in results: subject = self.generate_term(term_map=triple_map.subjectMap, **kwargs) for pred_obj_map in triple_map.predicateObjectMap: predicate = pred_obj_map.predicate if pred_obj_map.template is not None: output.add(( subject, predicate, self.generate_term(term_map=pred_obj_map, **kwargs))) if pred_obj_map.parentTriplesMap is not None: self.__handle_parents__( output, parent_map=pred_obj_map.parentTriplesMap, subject=subject, predicate=predicate, obj=row, **kwargs) if pred_obj_map.reference is not None: ref_exp = jsonpath_ng.parse(str(pred_obj_map.reference)) found_objects = [r.value for r in ref_exp.find(row)] for obj in found_objects: if rdflib.term._is_valid_uri(obj): rdf_obj = rdflib.URIRef(str(obj)) else: rdf_obj = rdflib.Literal(str(obj)) output.add((subject, predicate, rdf_obj)) if pred_obj_map.constant is not None: output.add((subject, predicate, pred_obj_map.constant)) subjects.append(subject) return subjects
1,153,963
Method takes a JSON source and any keywords and transforms from JSON to Lean BIBFRAME 2.0 triples Args: ---- source: str, dict
def run(self, source, **kwargs): kwargs['output'] = self.__graph__() if isinstance(source, str): import json source = json.loads(source) self.source = source super(JSONProcessor, self).run(**kwargs) self.output = kwargs['output'] return output
1,153,964
Internal method takes a triple_map and returns the result of applying to XPath to the current DOM context Args: ----- triple_map: SimpleNamespace element: etree.Element
def __generate_reference__(self, triple_map, **kwargs): element = kwargs.get("element") found_elements = element.xpath( triple_map.reference, namespaces=self.xml_ns) for elem in found_elements: raw_text = elem.text.strip() #! Quick and dirty test for valid URI if not raw_text.startswith("http"): continue return rdflib.URIRef(raw_text)
1,153,966
Method executes mapping between source Args: ----- triple_map: SimpleNamespace, Triple Map
def execute(self, triple_map, output, **kwargs): subjects = [] found_elements = self.source.xpath( str(triple_map.logicalSource.iterator), namespaces=self.xml_ns) for element in found_elements: subject = self.generate_term(term_map=triple_map.subjectMap, element=element, **kwargs) start = len(output) for row in triple_map.predicateObjectMap: predicate = row.predicate if row.template is not None: obj_ = self.generate_term(term_map=row, **kwargs) output.add((subject, predicate, obj_)) if row.parentTriplesMap is not None: self.__handle_parents__( output, parent_map=row.parentTriplesMap, subject=subject, predicate=predicate, **kwargs) new_subjects = self.__reference_handler__( output, predicate_obj_map=row, element=element, subject=subject) subjects.extend(new_subjects) if row.constant is not None: output.add((subject, predicate, row.constant)) if start < len(output): if triple_map.subjectMap.class_ is not None: output.add((subject, NS_MGR.rdf.type.rdflib, triple_map.subjectMap.class_)) subjects.append(subject) return subjects
1,153,968
Method takes either an etree.ElementTree or raw XML text as the first argument. Args: xml(etree.ElementTree or text
def run(self, xml, **kwargs): kwargs['output'] = self.__graph__() if isinstance(xml, str): try: self.source = etree.XML(xml) except ValueError: try: self.source = etree.XML(xml.encode()) except: raise ValueError("Cannot run error {}".format(sys.exc_info()[0])) else: self.source = xml super(XMLProcessor, self).run(**kwargs) self.output = kwargs['output'] return kwargs['output']
1,153,969
Internal method queries triplestore or remote sparql endpont and returns the bindings Args: ---- sparql: String of SPARQL query output_format: String of type of outputform
def __get_bindings__(self, sparql, output_format): return self.ext_conn.query(sparql, rtn_format=output_format, debug=False)
1,153,971
Method iterates through triple map's predicate object maps and processes query. Args: triple_map(SimpleNamespace): Triple Map
def execute(self, triple_map, output, **kwargs): sparql = PREFIX + triple_map.logicalSource.query.format( **kwargs) bindings = self.__get_bindings__(sparql) iterator = str(triple_map.logicalSource.iterator) for binding in bindings: entity_dict = binding.get(iterator) if isinstance(entity_dict, rdflib.term.Node): entity = entity_dict elif isinstance(entity_dict, dict): raw_value = entity_dict.get('value') if entity_dict.get('type').startswith('bnode'): entity = rdflib.BNode(raw_value) else: entity = rdflib.URIRef(raw_value) if triple_map.subjectMap.class_ is not None: output.add( (entity, rdflib.RDF.type, triple_map.subjectMap.class_)) sparql_query = self.__construct_compound_query__( triple_map).format(**kwargs) properties = self.__get_bindings__(sparql_query) for pred_obj_map in triple_map.predicateObjectMap: predicate = pred_obj_map.predicate if pred_obj_map.constant is not None: output.add( (entity, predicate, pred_obj_map.constant)) continue if "#" in str(predicate): key = str(predicate).split("#")[-1] else: key = str(predicate).split("/")[-1] for property_ in properties: if key in property_.keys(): info = {"about": property_.get(key)} object_ = __get_object__(info) output.add((entity, predicate, object_))
1,153,978
Function to add a workshift profile for every User that is created. Parameters: instance is an of UserProfile that was just saved.
def create_workshift_profile(sender, instance, created, **kwargs): if instance.user.username == ANONYMOUS_USERNAME or \ instance.status != UserProfile.RESIDENT: return try: semester = Semester.objects.get(current=True) except (Semester.DoesNotExist, Semester.MultipleObjectsReturned): pass else: profile, created = WorkshiftProfile.objects.get_or_create( user=instance.user, semester=semester, ) if created: utils.make_workshift_pool_hours( semester=semester, profiles=[profile], )
1,154,240
Adds mutually exclusive switch arguments. Args: arguments: a dictionary that maps switch name to helper text. Use sets to skip help texts.
def add_mutex_switch(parser, dest, arguments=set(), default=None, single_arg=False, required=False): if default is not None: assert default in arguments if isinstance(arguments, set): arguments = {k: None for k in arguments} if not single_arg: mg = parser.add_mutually_exclusive_group(required=required) for name, help_text in arguments.items(): kwargs = { "action": "store_const", "dest": dest, "const": name, "help": help_text } if default == name: kwargs["default"] = name mg.add_argument("--{}".format(name), **kwargs) return mg else: kwargs = { "dest": dest, "type": str, "default": default, "help": "\n".join("{}: {}".format(k, v) for k, v in arguments.items()), "choices": list(arguments.keys()) } return parser.add_argument("--{}".format(dest), **kwargs)
1,154,309
Prompts the user to save an SVG document to disk. Parameters: ----------- string : basestring A Python string containing a SVG document. parent : QWidget, optional The parent to use for the file dialog. Returns: -------- The name of the file to which the document was saved, or None if the save was cancelled.
def save_svg(string, parent=None): if isinstance(string, unicode): string = string.encode('utf-8') dialog = QtGui.QFileDialog(parent, 'Save SVG Document') dialog.setAcceptMode(QtGui.QFileDialog.AcceptSave) dialog.setDefaultSuffix('svg') dialog.setNameFilter('SVG document (*.svg)') if dialog.exec_(): filename = dialog.selectedFiles()[0] f = open(filename, 'w') try: f.write(string) finally: f.close() return filename return None
1,154,347
Copy a SVG document to the clipboard. Parameters: ----------- string : basestring A Python string containing a SVG document.
def svg_to_clipboard(string): if isinstance(string, unicode): string = string.encode('utf-8') mime_data = QtCore.QMimeData() mime_data.setData('image/svg+xml', string) QtGui.QApplication.clipboard().setMimeData(mime_data)
1,154,348
Convert a SVG document to a QImage. Parameters: ----------- string : basestring A Python string containing a SVG document. size : QSize, optional The size of the image that is produced. If not specified, the SVG document's default size is used. Raises: ------- ValueError If an invalid SVG string is provided. Returns: -------- A QImage of format QImage.Format_ARGB32.
def svg_to_image(string, size=None): if isinstance(string, unicode): string = string.encode('utf-8') renderer = QtSvg.QSvgRenderer(QtCore.QByteArray(string)) if not renderer.isValid(): raise ValueError('Invalid SVG data.') if size is None: size = renderer.defaultSize() image = QtGui.QImage(size, QtGui.QImage.Format_ARGB32) painter = QtGui.QPainter(image) renderer.render(painter) return image
1,154,349
Constructor args: name: string invalid filename will be changed to valid filename in files argnum: int, default=2 number of columns **kwargs: string keyword is the variable arguments is either 'f' (float) or 'l' (list)
def __init__( self, name, ext=None, argnum=2, filetype="pickle", location_dat=None, location_internal=None, **kwargs ): self.name = str(name) keepcharacters = (" ", ".", "_") self.valid_filename = "".join( c for c in self.name if c.isalnum() or c in keepcharacters ).rstrip() self.argnum = int(argnum) self.kwargs = kwargs with open(os.environ[ENV_VAR_SETTINGS], 'rb') as settings_file: settings = json.load(settings_file) root_dir = os.environ[ENV_VAR_ROOT_DIR] purpose = settings.get("PURPOSE", {}) self.filetype = filetype if filetype is "pickle" or "hickle"\ else "pickle" self.location_dat = location_dat if self.location_dat is None: self.location_dat = FILEPATHSTR.format( root_dir=root_dir, os_sep=os.sep, os_extsep=os.extsep, name=self.valid_filename, folder=purpose.get("data", {}).get("folder", "data"), ext=ext if ext is not None else purpose.get( "data", {}).get("extension", "dat"), ) self.location_internal = location_internal if self.location_internal is None: self.location_internal = FILEPATHSTR.format( root_dir=root_dir, os_sep=os.sep, os_extsep=os.extsep, name=self.valid_filename, folder=purpose.get("pickle", {}).get("folder", "pickle"), ext=purpose.get("pickle", {}).get("extension", "pickle") )
1,154,361
Load the user config Args: vcs (easyci.vcs.base.Vcs) - the vcs object for the current project Returns: dict - the config Raises: ConfigFormatError ConfigNotFoundError
def load_user_config(vcs): config_path = os.path.join(vcs.path, 'eci.yaml') if not os.path.exists(config_path): raise ConfigNotFoundError with open(config_path, 'r') as f: try: config = yaml.safe_load(f) except yaml.YAMLError: raise ConfigFormatError if not isinstance(config, dict): raise ConfigFormatError for k, v in _default_config.iteritems(): config.setdefault(k, v) for k, v in _config_types.iteritems(): if not isinstance(config[k], v): raise ConfigFormatError return config
1,154,500
Pick informations from :class:`.MARCXMLRecord` object and use it to build :class:`.SemanticInfo` structure. Args: xml (str/MARCXMLRecord): MarcXML which will be converted to SemanticInfo. In case of str, ``<record>`` tag is required. Returns: structure: :class:`.SemanticInfo`.
def from_xml(xml): hasAcquisitionFields = False acquisitionFields = [] ISBNAgencyFields = [] descriptiveCatFields = [] descriptiveCatReviewFields = [] subjectCatFields = [] subjectCatReviewFields = [] isClosed = False summaryRecordSysNumber = "" parsedSummaryRecordSysNumber = "" isSummaryRecord = False contentOfFMT = "" parsed = xml if not isinstance(xml, MARCXMLRecord): parsed = MARCXMLRecord(str(xml)) # handle FMT record if "FMT" in parsed.controlfields: contentOfFMT = parsed["FMT"] if contentOfFMT == "SE": isSummaryRecord = True if "HLD" in parsed.datafields or "HLD" in parsed.controlfields: hasAcquisitionFields = True if "STZ" in parsed.datafields: acquisitionFields.extend(parsed["STZa"]) acquisitionFields.extend(parsed["STZb"]) def sign_and_author(sign): return [sign.replace(" ", "")] + sign.other_subfields.get("b", []) # look for catalogization fields for orig_sign in parsed["ISTa"]: sign = orig_sign.replace(" ", "") # remove spaces if sign.startswith("jp2"): descriptiveCatFields.extend(sign_and_author(orig_sign)) elif sign.startswith("jr2"): descriptiveCatReviewFields.extend(sign_and_author(orig_sign)) elif sign.startswith("vp"): subjectCatFields.extend(sign_and_author(orig_sign)) elif sign.startswith("vr"): subjectCatReviewFields.extend(sign_and_author(orig_sign)) elif sign.startswith("ii2"): ISBNAgencyFields.extend(sign_and_author(orig_sign)) # look whether the record was 'closed' by catalogizators for status in parsed["BASa"]: if status == "90": isClosed = True # if multiple PJM statuses are present, join them together status = "\n".join([x for x in parsed["PJMa"]]) # detect link to 'new' record, if the old one was 'closed' if status.strip(): summaryRecordSysNumber = status parsedSummaryRecordSysNumber = _parse_summaryRecordSysNumber( summaryRecordSysNumber ) return SemanticInfo( hasAcquisitionFields=hasAcquisitionFields, acquisitionFields=acquisitionFields, ISBNAgencyFields=ISBNAgencyFields, descriptiveCatFields=descriptiveCatFields, descriptiveCatReviewFields=descriptiveCatReviewFields, subjectCatFields=subjectCatFields, subjectCatReviewFields=subjectCatReviewFields, isClosed=isClosed, isSummaryRecord=isSummaryRecord, contentOfFMT=contentOfFMT, parsedSummaryRecordSysNumber=parsedSummaryRecordSysNumber, summaryRecordSysNumber=summaryRecordSysNumber, )
1,154,780
Parse table from inputlist Args: inputlist: list List to parse outputfile: file .tex file to write fmt: dictionary key: integer column index starting with 0 values: string format string. eg "{:g}" **kwargs: nonestring: string string when objecttype is None Returns: None
def make_tex_table(inputlist, outputfile, close=False, fmt=None, **kwargs): output_str = "" if fmt is None: fmt = {} for row in inputlist: for key, val in enumerate(row): if val is None: output_str += r'\text{{{}}}'.format( str(kwargs.get("nonestring", "None")) ) else: # get default if np.isscalar(val): temp_str_fmt = "$\\num{{" + fmt.get( key, "{:g}") + "}}$" else: temp_str_fmt = fmt.get(key, "{}") temp_str = temp_str_fmt.format(val).replace("+", "") output_str += temp_str + "&" output_str = output_str[:-1] output_str += "\\\\\n" outputfile.write(output_str) if close: outputfile.close()
1,155,025
Executes a command and returns the result Args: cmd: command to execute paths: paths to search executable in cwd: working directory (defaults to ".") mute: if true, output will not be printed filters: gives a list of partial strings to filter out from the output (stdout or stderr) failure_ok: if False (default), a return code different than 0 will exit the application timeout: sub-process timeout Returns: command output
def run(cmd: str, *paths: str, cwd: str = '.', mute: bool = False, filters: typing.Optional[typing.Union[typing.Iterable[str], str]] = None, failure_ok: bool = False, timeout: float = _DEFAULT_PROCESS_TIMEOUT, ) -> typing.Tuple[str, int]: filters = _sanitize_filters(filters) exe_path, args_list = _parse_cmd(cmd, *paths) context = RunContext( # type: ignore exe_path=exe_path, capture=sarge.Capture(), failure_ok=failure_ok, mute=mute, args_list=args_list, paths=paths, cwd=cwd, timeout=timeout, filters=filters, ) if mute: context.result_buffer += f'{context.cmd_as_string}' else: _LOGGER_PROCESS.info('%s: running', context.cmd_as_string) context.start_process() monitor_running_process(context) check_error(context) return context.process_output_as_str, context.return_code
1,155,317
Initialize the object. Args: conf_path (str): See :attr:`conf_path`. project_key (str, default None): See :attr:`project_key`. If not set, the root of the database is used (this may cause performace issues). run_asyncore_thread (bool, default True): Run external asyncore thread, which handles connections to database? Default True.
def __init__(self, server, port, project_key=None, run_asyncore_thread=True): self.server = server self.port = port super(ZEOWrapper, self).__init__( project_key=project_key, run_asyncore_thread=run_asyncore_thread, )
1,155,429
Unpacks Any message and returns the unpacked message. This internal method is differnt from public Any Unpack method which takes the target message as argument. _InternalUnpackAny method does not have target message type and need to find the message type in descriptor pool. Args: msg: An Any message to be unpacked. Returns: The unpacked message.
def _InternalUnpackAny(msg): type_url = msg.type_url db = symbol_database.Default() if not type_url: return None # TODO(haberman): For now we just strip the hostname. Better logic will be # required. type_name = type_url.split("/")[-1] descriptor = db.pool.FindMessageTypeByName(type_name) if descriptor is None: return None message_class = db.GetPrototype(descriptor) message = message_class() message.ParseFromString(msg.value) return message
1,155,642
Creates doublelinked DOM from `data`. Args: data (str/HTMLElement): Either string or HTML element. Returns: obj: HTMLElement containing double linked DOM.
def _create_dom(data): if not isinstance(data, dhtmlparser.HTMLElement): data = dhtmlparser.parseString( utils.handle_encodnig(data) ) dhtmlparser.makeDoubleLinked(data) return data
1,155,716
Find location of elements matching patterns specified in `matches`. Args: dom (obj): HTMLElement DOM tree. matches (dict): Structure: ``{"var": {"data": "match", ..}, ..}``. Returns: dict: Structure: ``{"var": {"data": HTMLElement_obj, ..}, ..}``
def _match_elements(dom, matches): out = {} for key, content in matches.items(): pattern = content["data"].strip() if "\n" in pattern: pattern = pattern.split() transformer = lambda x: x.strip().split() else: transformer = lambda x: x.strip() matching_elements = _locate_element( dom, pattern, transformer=transformer ) not_found_msg = content.get("notfoundmsg", "").replace("$name", key) if not not_found_msg.strip(): not_found_msg = "Can't locate variable '%s' with content '%s'!" % ( key, pattern, ) content["notfoundmsg"] = not_found_msg # in case of multiple elements, find only elements with propert tagname tagname = content.get("tagname", "").strip().lower() if tagname: matching_elements = filter( lambda x: x.getTagName().strip().lower() == tagname, matching_elements ) if not matching_elements: raise UserWarning(not_found_msg) if len(matching_elements) > 1: raise UserWarning( "Ambigious content '%s'!" % content + "Content was found in multiple elements!" ) out[key] = matching_elements[0] return out
1,155,718
Collect all possible path which leads to `element`. Function returns standard path from root element to this, reverse path, which uses negative indexes for path, also some pattern matches, like "this is element, which has neighbour with id 7" and so on. Args: element (obj): HTMLElement instance. Returns: list: List of :class:`.PathCall` and :class:`.Chained` objects.
def _collect_paths(element): output = [] # look for element by parameters - sometimes the ID is unique path = vectors.el_to_path_vector(element) root = path[0] params = element.params if element.params else None match = root.find(element.getTagName(), params) if len(match) == 1: output.append( PathCall("find", 0, [element.getTagName(), params]) ) # look for element by neighbours output.extend(path_patterns.neighbours_pattern(element)) # look for elements by patterns - element, which parent has tagname, and # which parent has tagname .. output.extend(path_patterns.predecesors_pattern(element, root)) index_backtrack = [] last_index_backtrack = [] params_backtrack = [] last_params_backtrack = [] # look for element by paths from root to element for el in reversed(path): # skip root elements if not el.parent: continue tag_name = el.getTagName() match = el.parent.wfind(tag_name).childs index = match.index(el) index_backtrack.append( PathCall("wfind", index, [tag_name]) ) last_index_backtrack.append( PathCall("wfind", index - len(match), [tag_name]) ) # if element has some parameters, use them for lookup if el.params: match = el.parent.wfind(tag_name, el.params).childs index = match.index(el) params_backtrack.append( PathCall("wfind", index, [tag_name, el.params]) ) last_params_backtrack.append( PathCall("wfind", index - len(match), [tag_name, el.params]) ) else: params_backtrack.append( PathCall("wfind", index, [tag_name]) ) last_params_backtrack.append( PathCall("wfind", index - len(match), [tag_name]) ) output.extend([ Chained(reversed(params_backtrack)), Chained(reversed(last_params_backtrack)), Chained(reversed(index_backtrack)), Chained(reversed(last_index_backtrack)), ]) return output
1,155,719
Process `examples`, select only paths that works for every example. Select best paths with highest priority. Args: examples (dict): Output from :func:`.read_config`. Returns: list: List of :class:`.PathCall` and :class:`.Chained` objects.
def select_best_paths(examples): possible_paths = {} # {varname: [paths]} # collect list of all possible paths to all existing variables for example in examples: dom = _create_dom(example["html"]) matching_elements = _match_elements(dom, example["vars"]) for key, match in matching_elements.items(): if key not in possible_paths: # TODO: merge paths together? possible_paths[key] = _collect_paths(match) # leave only paths, that works in all examples where, are required for example in examples: dom = _create_dom(example["html"]) matching_elements = _match_elements(dom, example["vars"]) for key, paths in possible_paths.items(): if key not in matching_elements: continue possible_paths[key] = filter( lambda path: _is_working_path( dom, path, matching_elements[key] ), paths ) priorities = [ "find", "left_neighbour_tag", "right_neighbour_tag", "wfind", "match", "Chained" ] priorities = dict(map(lambda x: (x[1], x[0]), enumerate(priorities))) # sort all paths by priority table for key in possible_paths.keys(): possible_paths[key] = list(sorted( possible_paths[key], key=lambda x: priorities.get(x.call_type, 100) )) return possible_paths
1,155,721
Make sure, that `pub` is instance of the `obj_type`. Args: pub (obj): Instance which will be checked. name (str): Name of the instance. Used in exception. Default `pub`. obj_type (class): Class of which the `pub` should be instance. Default :class:`.DBPublication`. Raises: InvalidType: When the `pub` is not instance of `obj_type`.
def _assert_obj_type(pub, name="pub", obj_type=DBPublication): if not isinstance(pub, obj_type): raise InvalidType( "`%s` have to be instance of %s, not %s!" % ( name, obj_type.__name__, pub.__class__.__name__ ) )
1,155,743
getZernike Retrieve a map representing the index-th Zernike polynomial Args: index (int): The index of Zernike map to be generated, following Noll 1976 ordering. Returns: np.array: A map representing the index-th Zernike polynomial
def getZernike(self, index): if index not in list(self._dictCache.keys()): self._dictCache[index]= self._polar(index, self._rhoMap, self._thetaMap) return self._dictCache[index]
1,155,823