code
stringlengths
1
18.2k
def validate(self): """ Default validation for updated properties: MAY be overridden in children """ validate_properties(self._data_map, self._metadata_props) for prop in self._data_map: validate_any(prop, getattr(self, prop), self._data_structures.get(prop)) return self
def _search_regex(ops: dict, regex_pat: str): """ Search order: * specified regexps * operators sorted from longer to shorter """ custom_regexps = list(filter(None, [dic['regex'] for op, dic in ops.items() if 'regex' in dic])) op_names = [op for op, dic in ops.items() if 'regex' not in dic] regex = [regex_pat.format(_ops_regex(op_names))] if len(op_names) > 0 else [] return re.compile('|'.join(custom_regexps + regex))
def spec(self, postf_un_ops: str) -> list: """Return prefix unary operators list""" spec = [(l + op, {'pat': self.pat(pat), 'postf': self.postf(r, postf_un_ops), 'regex': None}) for op, pat in self.styles.items() for l, r in self.brackets] spec[0][1]['regex'] = self.regex_pat.format( _ops_regex(l for l, r in self.brackets), _ops_regex(self.styles.keys()) ) return spec
def spec(self) -> list: """Returns prefix unary operators list. Sets only one regex for all items in the dict.""" spec = [item for op, pat in self.ops.items() for item in [('{' + op, {'pat': pat, 'postf': self.postf, 'regex': None}), ('˱' + op, {'pat': pat, 'postf': self.postf, 'regex': None})] ] spec[0][1]['regex'] = self.regex_pat.format(_ops_regex(self.ops.keys())) return spec
def fill(self, postf_un_ops: str): """ Insert: * math styles * other styles * unary prefix operators without brackets * defaults """ for op, dic in self.ops.items(): if 'postf' not in dic: dic['postf'] = self.postf self.ops = OrderedDict( self.styles.spec(postf_un_ops) + self.other_styles.spec(postf_un_ops) + self.pref_un_greedy.spec() + list(self.ops.items()) ) for op, dic in self.ops.items(): dic['postf'] = re.compile(dic['postf']) self.regex = _search_regex(self.ops, self.regex_pat)
def one_symbol_ops_str(self) -> str: """Regex-escaped string with all one-symbol operators""" return re.escape(''.join((key for key in self.ops.keys() if len(key) == 1)))
def _su_scripts_regex(self): """ :return: [compiled regex, function] """ sups = re.escape(''.join([k for k in self.superscripts.keys()])) subs = re.escape(''.join([k for k in self.subscripts.keys()])) # language=PythonRegExp su_regex = (r'\\([{su_}])|([{sub}]+|‹[{sub}]+›|˹[{sub}]+˺)' + r'|([{sup}]+)(?=√)|([{sup}]+(?!√)|‹[{sup}]+›|˹[{sup}]+˺)').format( su_=subs + sups, sub=subs, sup=sups) su_regex = re.compile(su_regex) def su_replace(m): esc, sub, root_sup, sup = m.groups() if esc is not None: return esc elif sub is not None: return '_{' + ''.join([c if (c in
['‹', '›', '˹', '˺']) else self.subscripts[c] for c in sub]) + '}' elif root_sup is not None: return ''.join([self.superscripts[c] for c in root_sup]) elif sup is not None: return '^{' + ''.join([c if (c in ['‹', '›', '˹', '˺']) else self.superscripts[c] for c in sup]) + '}' else: raise TypeError("Regex bug: this should never be reached") return [su_regex, su_replace]
def _local_map(match, loc: str = 'lr') -> list: """ :param match: :param loc: str "l" or "r" or "lr" turns on/off left/right local area calculation :return: list list of the same size as the string + 2 it's the local map that counted { and } list can contain: None or int>=0 from the left of the operator match: in `b}a` if a:0 then
}:0 and b:1 in `b{a` if a:0 then {:0 and b:-1(None) from the right of the operator match: in `a{b` if a:0 then {:0 and b:1 in `a}b` if a:0 then }:0 and b:-1(None) Map for +1 (needed for r'$') and -1 (needed for r'^') characters is also stored: +1 -> +1, -1 -> +2 """ s = match.string map_ = [None] * (len(s)
+ 2) if loc == 'l' or loc == 'lr': balance = 0 for i in reversed(range(0, match.start())): map_[i] = balance c, prev = s[i], (s[i - 1] if i > 0 else '') if (c == '}' or c == '˲') and prev != '\\': balance += 1 elif (c == '{' or c == '˱') and prev != '\\': balance -= 1
if balance < 0: break map_[-1] = balance if loc == 'r' or loc == 'lr': balance = 0 for i in range(match.end(), len(s)): map_[i] = balance c, prev = s[i], s[i - 1] if (c == '{' or c == '˱') and prev != '\\': balance += 1 elif (c == '}' or c == '˲') and prev != '\\': balance -= 1
if balance < 0: break map_[len(s)] = balance return map_
def _operators_replace(self, string: str) -> str: """ Searches for first unary or binary operator (via self.op_regex that has only one group that contain operator) then replaces it (or escapes it if brackets do not match). Everything until: * space ' ' * begin/end of the string * bracket from outer scope (like '{a/b}': term1=a term2=b) is considered a term (contents of matching brackets '{}'
are ignored). Attributes ---------- string: str string to replace """ # noinspection PyShadowingNames def replace(string: str, start: int, end: int, substring: str) -> str: return string[0:start] + substring + string[end:len(string)] # noinspection PyShadowingNames def sub_pat(pat: Callable[[list], str] or str, terms: list) -> str: if isinstance(pat, str): return pat.format(*terms) else: return pat(terms) count = 0 def check(): nonlocal count count += 1 if count >
self.max_while: raise RuntimeError('Presumably while loop is stuck') # noinspection PyShadowingNames def null_replace(match) -> str: regex_terms = [gr for gr in match.groups() if gr is not None] op = regex_terms[0] terms = regex_terms[1:] return sub_pat(self.null_ops.ops[op]['pat'], terms) string = self.null_ops.regex.sub(null_replace, string) for ops, loc in [(self.pref_un_ops, 'r'), (self.postf_un_ops, 'l'), (self.bin_centr_ops, 'lr')]: count = 0 match = ops.regex.search(string) while match: check() regex_terms = [gr for gr in
match.groups() if gr is not None] op = regex_terms[0] loc_map = self._local_map(match, loc) lmatch, rmatch = None, None if loc == 'l' or loc == 'lr': for m in ops.ops[op]['pref'].finditer(string): if m.end() <= match.start() and loc_map[m.end() - 1] == 0: lmatch = m if lmatch is None: string = replace(string, match.start(), match.end(), match.group(0).replace(op, '\\' + op)) match = ops.regex.search(string) continue else: term1 = string[lmatch.end():match.start()]
if loc == 'r' or loc == 'lr': for m in ops.ops[op]['postf'].finditer(string): if m.start() >= match.end() and loc_map[m.start()] == 0: rmatch = m break if rmatch is None: string = replace(string, match.start(), match.end(), match.group(0).replace(op, '\\' + op)) match = ops.regex.search(string) continue else: term2 = string[match.end():rmatch.start()] if loc == 'l': # noinspection PyUnboundLocalVariable terms = list(lmatch.groups()) + [term1] + regex_terms[1:] start, end = lmatch.start(), match.end()
elif loc == 'r': # noinspection PyUnboundLocalVariable terms = regex_terms[1:] + [term2] + list(rmatch.groups()) start, end = match.start(), rmatch.end() elif loc == 'lr': terms = list(lmatch.groups()) + [term1] + regex_terms[1:] + [term2] + list(rmatch.groups()) start, end = lmatch.start(), rmatch.end() else: # this never happen terms = regex_terms[1:] start, end = match.start(), match.end() string = replace(string, start, end, sub_pat(ops.ops[op]['pat'], terms)) match = ops.regex.search(string) return string
def replace(self, src: str) -> str: """ Extends LaTeX syntax via regex preprocess :param src: str LaTeX string :return: str New LaTeX string """ if not self.readied: self.ready() # Brackets + simple pre replacements: src = self._dict_replace(self.simple_pre, src) # Superscripts and subscripts + pre regexps: for regex, replace in self.regex_pre: src = regex.sub(replace, src) # Unary and binary operators: src = self._operators_replace(src) # Loop
regexps: src_prev = src for i in range(self.max_iter): for regex, replace in self.loop_regexps: src = regex.sub(replace, src) if src_prev == src: break else: src_prev = src # Post regexps: for regex, replace in self.regex_post: src = regex.sub(replace, src) # Simple post replacements: src = self._dict_replace(self.simple_post, src) # Escape characters: src = self.escapes_regex.sub(r'\1', src) return src
def plot_gaps(plot, columns): """ plot % of gaps at each position """ from plot_window import window_plot_convolve as plot_window # plot_window([columns], len(columns)*.01, plot) plot_window([[100 - i for i in columns]], len(columns)*.01, plot)
def strip_msa_100(msa, threshold, plot = False): """ strip out columns of a MSA that represent gaps for X percent (threshold) of sequences """ msa = [seq for seq in parse_fasta(msa)] columns = [[0, 0] for pos in msa[0][1]] # [[#bases, #gaps], [#bases, #gaps], ...] for seq in msa: for position, base in enumerate(seq[1]): if base == '-' or base == '.': columns[position][1] += 1
else: columns[position][0] += 1 columns = [float(float(g)/float(g+b)*100) for b, g in columns] # convert to percent gaps for seq in msa: stripped = [] for position, base in enumerate(seq[1]): if columns[position] < threshold: stripped.append(base) yield [seq[0], ''.join(stripped)] if plot is not False: plot_gaps(plot, columns)
def sample_group(sid, groups): """ Iterate through all categories in an OrderedDict and return category name if SampleID present in that category. :type sid: str :param sid: SampleID from dataset. :type groups: OrderedDict :param groups: Returned dict from phylotoast.util.gather_categories() function. :return type: str :return: Category name used to classify `sid`. """ for name in groups: if sid in groups[name].sids: return name
def combine_sets(*sets): """ Combine multiple sets to create a single larger set. """ combined = set() for s in sets: combined.update(s) return combined
def unique_otuids(groups): """ Get unique OTUIDs of each category. :type groups: Dict :param groups: {Category name: OTUIDs in category} :return type: dict :return: Dict keyed on category name and unique OTUIDs as values. """ uniques = {key: set() for key in groups} for i, group in enumerate(groups): to_combine = groups.values()[:i]+groups.values()[i+1:] combined = combine_sets(*to_combine) uniques[group] = groups[group].difference(combined) return uniques
def shared_otuids(groups): """ Get shared OTUIDs between all unique combinations of groups. :type groups: Dict :param groups: {Category name: OTUIDs in category} :return type: dict :return: Dict keyed on group combination and their shared OTUIDs as values. """ for g in sorted(groups): print("Number of OTUs in {0}: {1}".format(g, len(groups[g].results["otuids"]))) number_of_categories = len(groups) shared = defaultdict() for i in range(2, number_of_categories+1): for j in combinations(sorted(groups),
i): combo_name = " & ".join(list(j)) for grp in j: # initialize combo values shared[combo_name] = groups[j[0]].results["otuids"].copy() """iterate through all groups and keep updating combo OTUIDs with set intersection_update""" for grp in j[1:]: shared[combo_name].intersection_update(groups[grp].results["otuids"]) return shared
def write_uniques(path, prefix, uniques): """ Given a path, the method writes out one file for each group name in the uniques dictionary with the file name in the pattern PATH/prefix_group.txt with each file containing the unique OTUIDs found when comparing that group to all the other groups in uniques. :type path: str :param path: Output files will be saved in this PATH. :type prefix:
str :param prefix: Prefix name added in front of output filename. :type uniques: dict :param uniques: Output from unique_otus() function. """ for group in uniques: fp = osp.join(path, "{}_{}.txt".format(prefix, group)) with open(fp, "w") as outf: outf.write("\n".join(uniques[group]))
def storeFASTA(fastaFNH): """ Parse the records in a FASTA-format file by first reading the entire file into memory. :type source: path to FAST file or open file handle :param source: The data source from which to parse the FASTA records. Expects the input to resolve to a collection that can be iterated through, such as an open file handle. :rtype: tuple :return: FASTA records
containing entries for id, description and data. """ fasta = file_handle(fastaFNH).read() return [FASTARecord(rec[0].split()[0], rec[0].split(None, 1)[1], "".join(rec[1:])) for rec in (x.strip().split("\n") for x in fasta.split(">")[1:])]
def parseFASTA(fastaFNH): """ Parse the records in a FASTA-format file keeping the file open, and reading through one line at a time. :type source: path to FAST file or open file handle :param source: The data source from which to parse the FASTA records. Expects the input to resolve to a collection that can be iterated through, such as an open file handle. :rtype:
tuple :return: FASTA records containing entries for id, description and data. """ recs = [] seq = [] seqID = "" descr = "" for line in file_handle(fastaFNH): line = line.strip() if line[0] == ";": continue if line[0] == ">": # conclude previous record if seq: recs.append(FASTARecord(seqID, descr, "".join(seq))) seq = [] # start new record line = line[1:].split(None, 1) seqID, descr = line[0],
line[1] else: seq.append(line) # catch last seq in file if seq: recs.append(FASTARecord(seqID, descr, "".join(seq))) return recs
def parse_map_file(mapFNH): """ Opens a QIIME mapping file and stores the contents in a dictionary keyed on SampleID (default) or a user-supplied one. The only required fields are SampleID, BarcodeSequence, LinkerPrimerSequence (in that order), and Description (which must be the final field). :type mapFNH: str :param mapFNH: Either the full path to the map file or an open file handle :rtype: tuple, dict :return:
A tuple of header line for mapping file and a map associating each line of the mapping file with the appropriate sample ID (each value of the map also contains the sample ID). An OrderedDict is used for mapping so the returned map is guaranteed to have the same order as the input file. Example data: #SampleID BarcodeSequence LinkerPrimerSequence State Description 11.V13 ACGCTCGACA GTTTGATCCTGGCTCAG
Disease Rat_Oral """ m = OrderedDict() map_header = None with file_handle(mapFNH) as mapF: for line in mapF: if line.startswith("#SampleID"): map_header = line.strip().split("\t") if line.startswith("#") or not line: continue line = line.strip().split("\t") m[line[0]] = line return map_header, m