query
stringlengths
9
60
language
stringclasses
1 value
code
stringlengths
105
25.7k
url
stringlengths
91
217
fuzzy match ranking
python
def _exact_fuzzy_match(response, match, insensitive): ''' Return True if the response matches fuzzily exactly. Insensitivity is taken into account. ''' if insensitive: response = response.lower() match = match.lower() r_words = response.split() m_words = match.split() # match whole words first for word in r_words: if word in m_words: r_words.remove(word) m_words.remove(word) # no partial matches allowed # if all the items in the response were matched, # and all the items in the match were matched, # then this is an exact fuzzy match return len(r_words) == 0 and len(m_words) == 0
https://github.com/toejough/pimento/blob/cdb00a93976733aa5521f8504152cedeedfc711a/pimento/__init__.py#L276-L295
fuzzy match ranking
python
def _fuzzily_matches(response, candidate): '''return True if response fuzzily matches candidate''' r_words = response.split() c_words = candidate.split() # match whole words first for word in r_words: if word in c_words: r_words.remove(word) c_words.remove(word) # match partial words, fewest matches first match_pairs = [] for partial in sorted(r_words, key=lambda p: len(p), reverse=True): matches = [w for w in c_words if partial in w] match_pairs.append((partial, matches)) # if all items can be uniquly matched, the match is passed while len(match_pairs): min_pair = min(match_pairs, key=lambda x:len(x[1])) # this is the partial and matches with the shortest match list # if there are ever no matches for something, the match is failed if len(min_pair[1]) == 0: return False # choose the match with the fewest matches to remaining partials. # that way we leave more options for more partials, for the best # chance of a full match partials_left = [p[0] for p in match_pairs] min_option = min(min_pair[1], key=lambda x:len([p for p in partials_left if x in p])) # remove the current pair - we've matched it now match_pairs.remove(min_pair) # remove the matched option from all pairs' options so it won't be matched again for pair in match_pairs: pair_options = pair[1] if min_option in pair_options: pair_options.remove(min_option) # if all the items in the response were matched, this is match return True
https://github.com/toejough/pimento/blob/cdb00a93976733aa5521f8504152cedeedfc711a/pimento/__init__.py#L234-L268
fuzzy match ranking
python
def fuzzy_search_by_title(self, title, ignore_groups=None): """Find an entry by by fuzzy match. This will check things such as: * case insensitive matching * typo checks * prefix matches If the ``ignore_groups`` argument is provided, then any matching entries in the ``ignore_groups`` list will not be returned. This argument can be used to filter out groups you are not interested in. Returns a list of matches (an empty list is returned if no matches are found). """ entries = [] # Exact matches trump for entry in self.entries: if entry.title == title: entries.append(entry) if entries: return self._filter_entries(entries, ignore_groups) # Case insensitive matches next. title_lower = title.lower() for entry in self.entries: if entry.title.lower() == title.lower(): entries.append(entry) if entries: return self._filter_entries(entries, ignore_groups) # Subsequence/prefix matches next. for entry in self.entries: if self._is_subsequence(title_lower, entry.title.lower()): entries.append(entry) if entries: return self._filter_entries(entries, ignore_groups) # Finally close matches that might have mispellings. entry_map = {entry.title.lower(): entry for entry in self.entries} matches = difflib.get_close_matches( title.lower(), entry_map.keys(), cutoff=0.7) if matches: return self._filter_entries( [entry_map[name] for name in matches], ignore_groups) return []
https://github.com/jamesls/python-keepassx/blob/cf3c8f33b17b8eb6beaa1a8dd83ce1921dcde975/keepassx/db.py#L332-L376
fuzzy match ranking
python
def fuzzy(cls, field, value, boost=None, min_similarity=None, prefix_length=None): ''' http://www.elasticsearch.org/guide/reference/query-dsl/fuzzy-query.html A fuzzy based query that uses similarity based on Levenshtein (edit distance) algorithm. ''' instance = cls(fuzzy={field: {'value': value}}) if boost is not None: instance['fuzzy'][field]['boost'] = boost if min_similarity is not None: instance['fuzzy'][field]['min_similarity'] = min_similarity if prefix_length is not None: instance['fuzzy'][field]['prefix_length'] = prefix_length return instance
https://github.com/ooici/elasticpy/blob/ec221800a80c39e80d8c31667c5b138da39219f2/elasticpy/query.py#L156-L168
fuzzy match ranking
python
def fuzzy_match(image, template, normed_tolerance=None, raw_tolerance=None, method='correlation'): """Determines, using one of two methods, whether a match(es) is present and returns the positions of the bottom right corners of the matches. Fuzzy matches returns regions, so the center of each region is returned as the final match location USE THIS FUNCTION IF you need to match, e.g. the same image but rendered slightly different with respect to anti aliasing; the same image on a number of different backgrounds. The method is the name of the matching method used, the details of this do not matter. Use the default method unless you have too many false positives, in this case, use the method 'correlation coefficient.' The correlation coefficient method can also be more robust at matching when the match might not be exact. The raw_tolerance is the proportion of the value at match positions (i.e. the value returned for an exact match) that we count as a match. For fuzzy matching, this value will not be exactly the value returned for an exact match N. B. Lowering raw_tolerance increases the number of potential match tiles requiring normalisation. This DRAMATICALLY slows down matching as normalisation (a process which eliminates false positives) The normed_tolerance is how far a potential match value can differ from one after normalisation. The tolerance values indicated below are from a short investigation, looking to minimise missing items we wish to match, as all as false positives which inevitably occur when performing fuzzy matching. To generate these values, we tested maching letters with different type of antialiasing on a number of backgrounds. """ if method == 'correlation': if not raw_tolerance: raw_tolerance = 0.95 if not normed_tolerance: normed_tolerance = 0.95 results = np.array(match_via_correlation(image, template, raw_tolerance=raw_tolerance, normed_tolerance=normed_tolerance)) elif method == 'correlation coefficient': if not raw_tolerance: raw_tolerance = 0.95 if not normed_tolerance: normed_tolerance = 0.95 results = np.array(match_via_correlation_coefficient(image, template, raw_tolerance=raw_tolerance, normed_tolerance=normed_tolerance)) elif method == 'squared difference': if not raw_tolerance: raw_tolerance = 0.95 if not normed_tolerance: normed_tolerance = 0.05 results = np.array(match_via_squared_difference(image, template, raw_tolerance=raw_tolerance, sq_diff_tolerance=normed_tolerance)) h, w = image.shape th, tw = template.shape results = np.array([(result[0], result[1]) for result in results]) #match_x, match_y = int(np.mean(results[:,1])), int(np.mean(results[:,0])) results_aggregated_mean_match_position = match_positions((h,w), results) return results_aggregated_mean_match_position
https://github.com/ten10solutions/Geist/blob/a1ef16d8b4c3777735008b671a50acfde3ce7bf1/geist/matchers.py#L64-L110
fuzzy match ranking
python
def query(self, w, ed=1): # Can only handle ed=1 """ Finds the fuzzy matches (within edit distance 1) of w from words """ assert ed <= self._ed if ed == 0: return [w] if w in self._L else [''] w = str(w) n = len(w) prefix, suffix = w[:n // 2], w[n // 2:][::-1] options_w_prefix = self._L.keys(prefix) options_w_suffix = [x[::-1] for x in self._R.iterkeys(suffix)] return [ _w for _w in set(itertools.chain(options_w_prefix, options_w_suffix)) if abs(len(_w) - len(w)) <= 1 and lvdistance(str(_w), str(w), 1) <= 1 ]
https://github.com/rchatterjee/pwmodels/blob/e277411f8ebaf4ad1c208d2b035b4b68f7471517/src/pwmodel/fast_fuzzysearch.py#L107-L123
fuzzy match ranking
python
def get_similar(obj, labels, default=None, min_similarity=0.5): """Similar to fuzzy_get, but allows non-string keys and a list of possible keys Searches attributes in addition to keys and indexes to find the closest match. See Also: `fuzzy_get` """ raise NotImplementedError( "Unfinished implementation, needs to be in fuzzy_get where list of scores & keywords is sorted.") labels = listify(labels) def not_found(*args, **kwargs): return 0 min_score = int(min_similarity * 100) for similarity_score in [100, 95, 90, 80, 70, 50, 30, 10, 5, 0]: if similarity_score <= min_score: similarity_score = min_score for label in labels: try: result = obj.get(label, not_found) except AttributeError: try: result = obj.__getitem__(label) except (IndexError, TypeError): result = not_found if result is not not_found: return result if similarity_score == min_score: if result is not not_found: return result
https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/util.py#L1094-L1126
fuzzy match ranking
python
def best_match_from_list(item,options,fuzzy=90,fname_match=True,fuzzy_fragment=None,guess=False): '''Returns the best match from :meth:`matches_from_list` or ``None`` if no good matches''' matches = matches_from_list(item,options,fuzzy,fname_match,fuzzy_fragment,guess) if len(matches)>0: return matches[0] return None
https://github.com/azraq27/gini/blob/3c2b5265d096d606b303bfe25ac9adb74b8cee14/gini/matching.py#L56-L61
fuzzy match ranking
python
def fuzzy_index_match(possiblities, label, **kwargs): """Find the closest matching column label, key, or integer indexed value Returns: type(label): sequence of immutable objects corresponding to best matches to each object in label if label is an int returns the object (value) in the list of possibilities at that index if label is a str returns the closest str match in possibilities >>> from collections import OrderedDict as odict >>> fuzzy_index_match(pd.DataFrame(pd.np.random.randn(9,4), columns=list('ABCD'), index=range(9)), 'b') 'B' >>> fuzzy_index_match(odict(zip('12345','ABCDE')), 'r2d2') '2' >>> fuzzy_index_match(odict(zip('12345','ABCDE')), 1) '2' >>> fuzzy_index_match(odict(zip('12345','ABCDE')), -1) '5' >>> fuzzy_index_match(odict(zip(range(4),'FOUR')), -4) 0 """ possibilities = list(possiblities) if isinstance(label, basestring): return fuzzy_get(possibilities, label, **kwargs) if isinstance(label, int): return possibilities[label] if isinstance(label, list): return [fuzzy_get(possibilities, lbl) for lbl in label]
https://github.com/hobson/pug-invest/blob/836911258a0e920083a88c91beae88eefdebb20c/pug/invest/util.py#L1008-L1034
fuzzy match ranking
python
def fuzzy_search(self, *filters): """ Perform a "fuzzy" search that matches the given characters in the given order. :param filters: The pattern(s) to search for. :returns: The matched password names (a list of strings). """ matches = [] logger.verbose( "Performing fuzzy search on %s (%s) ..", pluralize(len(filters), "pattern"), concatenate(map(repr, filters)) ) patterns = list(map(create_fuzzy_pattern, filters)) for entry in self.filtered_entries: if all(p.search(entry.name) for p in patterns): matches.append(entry) logger.log( logging.INFO if matches else logging.VERBOSE, "Matched %s using fuzzy search.", pluralize(len(matches), "password"), ) return matches
https://github.com/xolox/python-qpass/blob/43ce447b0904ff42a54b8f1dd4d2479f950f258f/qpass/__init__.py#L110-L130
fuzzy match ranking
python
def parse_matchup(self): """ Parse the banner matchup meta info for the game. :returns: ``self`` on success or ``None`` """ lx_doc = self.html_doc() try: if not self.matchup: self.matchup = self._fill_meta(lx_doc) return self except: return None
https://github.com/robhowley/nhlscrapi/blob/2273683497ff27b0e92c8d1557ff0ce962dbf43b/nhlscrapi/scrapr/reportloader.py#L74-L86
fuzzy match ranking
python
def fuzzmatch(self, fuzzkey, multi=False): """ Identify a filter by fuzzy string matching. Partial ('fuzzy') matching performed by `fuzzywuzzy.fuzzy.ratio` Parameters ---------- fuzzkey : str A string that partially matches one filter name more than the others. Returns ------- The name of the most closely matched filter. : str """ keys, ratios = np.array([(f, seqm(None, fuzzkey, f).ratio()) for f in self.components.keys()]).T mratio = max(ratios) if multi: return keys[ratios == mratio] else: if sum(ratios == mratio) == 1: return keys[ratios == mratio][0] else: raise ValueError("\nThe filter key provided ('{:}') matches two or more filter names equally well:\n".format(fuzzkey) + ', '.join(keys[ratios == mratio]) + "\nPlease be more specific!")
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/filtering/filt_obj.py#L319-L344
fuzzy match ranking
python
def _exact_match(response, matches, insensitive, fuzzy): ''' returns an exact match, if it exists, given parameters for the match ''' for match in matches: if response == match: return match elif insensitive and response.lower() == match.lower(): return match elif fuzzy and _exact_fuzzy_match(response, match, insensitive): return match else: return None
https://github.com/toejough/pimento/blob/cdb00a93976733aa5521f8504152cedeedfc711a/pimento/__init__.py#L298-L311
fuzzy match ranking
python
def matches_from_list(item,options,fuzzy=90,fname_match=True,fuzzy_fragment=None,guess=False): '''Returns the members of ``options`` that best matches ``item``. Will prioritize exact matches, then filename-style matching, then fuzzy matching. Returns a tuple of item, index, match type, and fuzziness (if applicable) :item: string to match :options: list of examples to test against :fuzzy: integer (out of 100) describing how close to match string :fname_match: use filename globbing to match files? :fuzzy_fragment: if not ``None``, will accept substring matches of at least ``fuzzy_fragment`` fuzziness :guess: if ``True``, shortcut for setting ``fuzzy`` and ``min_fragment`` to very lenient options ''' matches = [] if guess: fuzzy = min(fuzzy,80) fuzzy_fragment = min(fuzzy_fragment,70) option_not_in = lambda item,match_list: all([x[0]!=item for x in match_list]) # Exact matches if item in options: matches += [(options[i],i,'exact',None) for i in xrange(len(options)) if options[i].lower()==item.lower()] # If we have exact matches, don't bother with fuzzy matching return matches # Filename-style matches if fname_match: matches += [(x,options.index(x),'fname',None) for x in fnmatch.filter(options,item) if option_not_in(x,matches)] # Fuzzy matches if fuzzy: sub_matches = [] for i in xrange(len(options)): r = fuzz.ratio(item.lower(),options[i].lower()) if r>=fuzzy and option_not_in(options[i],matches): sub_matches.append((r,i)) matches += [(options[x[1]],x[1],'fuzzy',x[0]) for x in sorted(sub_matches)] # Fragment matches if fuzzy_fragment: sub_matches = [] for i in xrange(len(options)): r = fuzz.partial_ratio(item.lower(),options[i].lower()) if r>=fuzzy_fragment and option_not_in(options[i],matches): sub_matches.append((r,i)) matches += [(options[x[1]],x[1],'fuzzy_fragment',x[0]) for x in sorted(sub_matches)] return matches
https://github.com/azraq27/gini/blob/3c2b5265d096d606b303bfe25ac9adb74b8cee14/gini/matching.py#L4-L54
fuzzy match ranking
python
def best_match(self, matches): """ Find the most similar string to self.target. Given a list of candidate strings find the closest match to self.target, returning the best match with a score indicating closeness of match. :param matches: A list of candidate matches :returns: A tuple of (score, best_match) """ best_match = None for match, message in matches: self.matcher.set_seq1(message) ratio = self.matcher.quick_ratio() if best_match is None or ratio >= best_match[0]: new_ratio = self.matcher.ratio() if best_match is None or new_ratio > best_match[0]: best_match = (new_ratio, match) return best_match
https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/autoclassify/matchers.py#L186-L205
fuzzy match ranking
python
def best_match(self, matches, default=None): """Returns the best match from a list of possible matches based on the quality of the client. If two items have the same quality, the one is returned that comes first. :param matches: a list of matches to check for :param default: the value that is returned if none match """ if matches: best_quality = -1 result = default for client_item, quality in self: for server_item in matches: if quality <= best_quality: break if self._value_matches(server_item, client_item): best_quality = quality result = server_item return result else: return self.best
https://github.com/quantmind/pulsar/blob/fee44e871954aa6ca36d00bb5a3739abfdb89b26/pulsar/apps/wsgi/structures.py#L121-L141
fuzzy match ranking
python
def best_match(self, target, choices): """\ Return the best match. """ all = self.all_matches try: best = next(all(target, choices, group=False)) return best except StopIteration: pass
https://github.com/chrlie/howabout/blob/780cacbdd9156106cc77f643c75191a824b034bb/src/howabout/__init__.py#L130-L141
fuzzy match ranking
python
def match(column, term, match_type=None, options=None): """Generates match predicate for fulltext search :param column: A reference to a column or an index, or a subcolumn, or a dictionary of subcolumns with boost values. :param term: The term to match against. This string is analyzed and the resulting tokens are compared to the index. :param match_type (optional): The match type. Determine how the term is applied and the score calculated. :param options (optional): The match options. Specify match type behaviour. (Not possible without a specified match type.) Match options must be supplied as a dictionary. """ return Match(column, term, match_type, options)
https://github.com/crate/crate-python/blob/68e39c95f5bbe88b74bbfa26de4347fc644636a8/src/crate/client/sqlalchemy/predicates/__init__.py#L70-L86
fuzzy match ranking
python
def _parse_ranking(self, field, boxscore): """ Parse each team's rank if applicable. Retrieve the team's rank according to the rankings published each week. The ranking for the week is only located in the scores section at the top of the page and not in the actual boxscore information. The rank is after the team name inside a parenthesis with a special 'pollrank' attribute. If this is not in the team's boxscore information, the team is assumed to not have a rank and will return a value of None. Parameters ---------- field : string The name of the attribute to parse. boxscore : PyQuery object A PyQuery obejct containing all of the HTML data from the boxscore. Returns ------- int An int representing the team's ranking or None if the team is not ranked. """ ranking = None index = BOXSCORE_ELEMENT_INDEX[field] teams_boxscore = boxscore(BOXSCORE_SCHEME[field]) # Occasionally, the list of boxscores for the day won't be saved on the # page. If that's the case, return the default ranking. if str(teams_boxscore) == '': return ranking team = pq(teams_boxscore[index]) if 'pollrank' in str(team): rank_str = re.findall(r'\(\d+\)', str(team)) if len(rank_str) == 1: ranking = int(rank_str[0].replace('(', '').replace(')', '')) return ranking
https://github.com/roclark/sportsreference/blob/ea0bae432be76450e137671d2998eb38f962dffd/sportsreference/ncaab/boxscore.py#L311-L348
fuzzy match ranking
python
def best_match(self, seqs, scan_rc=True): """ give the best match of each motif in each sequence returns an iterator of nested lists containing tuples: (score, position, strand) """ self.set_threshold(threshold=0.0) for matches in self.scan(seqs, 1, scan_rc): yield [m[0] for m in matches]
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/scanner.py#L550-L558
fuzzy match ranking
python
def match(self, key=None, year=None, event=None, type='qm', number=None, round=None, simple=False): """ Get data on a match. You may either pass the match's key directly, or pass `year`, `event`, `type`, `match` (the match number), and `round` if applicable (playoffs only). The event year may be specified as part of the event key or specified in the `year` parameter. :param key: Key of match to get data on. First option for specifying a match (see above). :param year: Year in which match took place. Optional; if excluded then must be included in event key. :param event: Key of event in which match took place. Including year is optional; if excluded then must be specified in `year` parameter. :param type: One of 'qm' (qualifier match), 'qf' (quarterfinal), 'sf' (semifinal), 'f' (final). If unspecified, 'qm' will be assumed. :param number: Match number. For example, for qualifier 32, you'd pass 32. For Semifinal 2 round 3, you'd pass 2. :param round: For playoff matches, you will need to specify a round. :param simple: Get only vital data. :return: A single Match object. """ if key: return Match(self._get('match/%s%s' % (key, '/simple' if simple else ''))) else: return Match(self._get('match/{year}{event}_{type}{number}{round}{simple}'.format(year=year if not event[0].isdigit() else '', event=event, type=type, number=number, round=('m%s' % round) if not type == 'qm' else '', simple='/simple' if simple else '')))
https://github.com/frc1418/tbapy/blob/3866d5a9971fe3dfaf1a1d83638bd6be6070f0c4/tbapy/main.py#L354-L377
fuzzy match ranking
python
def _score(cluster): """ score of the cluster, in this case, is the number of non-repetitive matches """ x, y = zip(*cluster)[:2] return min(len(set(x)), len(set(y)))
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/compara/synteny.py#L256-L261
fuzzy match ranking
python
def _match(self): """Find all matches and generate a position group for each match.""" #disable optimized matching optimized_rows = None optimized_columns = None for match in self.__match_rows(optimized_rows): #match in rows yield match for match in self.__match_rows(optimized_columns, transpose=True): #match in columns and transpose coordinates yield match
https://github.com/kobejohn/PQHelper/blob/d2b78a22dcb631794295e6a159b06f39c3f10db6/pqhelper/base.py#L755-L766
fuzzy match ranking
python
def match(self, fname, flevel, ftype): '''Returns the result score if the file matches this rule''' # if filetype is the same # and level isn't set or level is the same # and pattern matche the filename if self.filetype == ftype and (self.level is None or self.level == flevel) and fnmatch.fnmatch(fname, self.pattern): return self.score return 0
https://github.com/doconix/django-mako-plus/blob/a90f9b4af19e5fa9f83452989cdcaed21569a181/django_mako_plus/management/commands/dmp_collectstatic.py#L31-L38
fuzzy match ranking
python
def findAllMatches(self, needle, similarity): """ Finds all matches above ``similarity`` using a search pyramid to improve efficiency Pyramid implementation unashamedly stolen from https://github.com/stb-tester/stb-tester """ positions = [] # Use findBestMatch to get the best match while True: best_match = self.findBestMatch(needle, similarity) if best_match is None: # No more matches break # Found a match. Add it to our list positions.append(best_match) # (position, confidence) # Erase the found match from the haystack. # Repeat this process until no other matches are found x, y = best_match[0] w = needle.shape[1] h = needle.shape[0] roi = (x, y, w, h) # numpy 2D slice roi_slice = (slice(roi[1], roi[1]+roi[3]), slice(roi[0], roi[0]+roi[2])) self.haystack[roi_slice] = 0 # Whew! Let's see if there's a match after all that. positions.sort(key=lambda x: (x[0][1], x[0][0])) return positions
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/TemplateMatchers.py#L218-L244
fuzzy match ranking
python
def sort_matches(matches): '''Sorts a ``list`` of matches best to worst''' multipliers = {'exact':10**5,'fname':10**4,'fuzzy':10**2,'fuzzy_fragment':1} matches = [(multipliers[x.type]*(x.amount if x.amount else 1),x) for x in matches] return [x[1] for x in sorted(matches,reverse=True)]
https://github.com/azraq27/gini/blob/3c2b5265d096d606b303bfe25ac9adb74b8cee14/gini/semantics.py#L29-L33
fuzzy match ranking
python
def match(self, selector, index): """Match the selector.""" pseudo = None m = self.re_pseudo_name.match(selector, index) if m: name = util.lower(css_unescape(m.group('name'))) pattern = self.patterns.get(name) if pattern: pseudo = pattern.match(selector, index) if pseudo: self.matched_name = pattern return pseudo
https://github.com/facelessuser/soupsieve/blob/24859cc3e756ebf46b75547d49c6b4a7bf35ee82/soupsieve/css_parser.py#L348-L361
fuzzy match ranking
python
def _match_cubes(ccube_clean, ccube_dirty, bexpcube_clean, bexpcube_dirty, hpx_order): """ Match the HEALPIX scheme and order of all the input cubes return a dictionary of cubes with the same HEALPIX scheme and order """ if hpx_order == ccube_clean.hpx.order: ccube_clean_at_order = ccube_clean else: ccube_clean_at_order = ccube_clean.ud_grade(hpx_order, preserve_counts=True) if hpx_order == ccube_dirty.hpx.order: ccube_dirty_at_order = ccube_dirty else: ccube_dirty_at_order = ccube_dirty.ud_grade(hpx_order, preserve_counts=True) if hpx_order == bexpcube_clean.hpx.order: bexpcube_clean_at_order = bexpcube_clean else: bexpcube_clean_at_order = bexpcube_clean.ud_grade(hpx_order, preserve_counts=True) if hpx_order == bexpcube_dirty.hpx.order: bexpcube_dirty_at_order = bexpcube_dirty else: bexpcube_dirty_at_order = bexpcube_dirty.ud_grade(hpx_order, preserve_counts=True) if ccube_dirty_at_order.hpx.nest != ccube_clean.hpx.nest: ccube_dirty_at_order = ccube_dirty_at_order.swap_scheme() if bexpcube_clean_at_order.hpx.nest != ccube_clean.hpx.nest: bexpcube_clean_at_order = bexpcube_clean_at_order.swap_scheme() if bexpcube_dirty_at_order.hpx.nest != ccube_clean.hpx.nest: bexpcube_dirty_at_order = bexpcube_dirty_at_order.swap_scheme() ret_dict = dict(ccube_clean=ccube_clean_at_order, ccube_dirty=ccube_dirty_at_order, bexpcube_clean=bexpcube_clean_at_order, bexpcube_dirty=bexpcube_dirty_at_order) return ret_dict
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/diffuse/residual_cr.py#L62-L102
fuzzy match ranking
python
async def get_final_ranking(self) -> OrderedDict: """ Get the ordered players ranking Returns: collections.OrderedDict[rank, List[Participant]]: Raises: APIException """ if self._state != TournamentState.complete.value: return None ranking = {} for p in self.participants: if p.final_rank in ranking: ranking[p.final_rank].append(p) else: ranking[p.final_rank] = [p] return OrderedDict(sorted(ranking.items(), key=lambda t: t[0]))
https://github.com/fp12/achallonge/blob/25780b3c48b66400a50ff9f884e4287afd4c89e4/challonge/tournament.py#L763-L783
fuzzy match ranking
python
def _compare_frame_rankings(ref, est, transitive=False): '''Compute the number of ranking disagreements in two lists. Parameters ---------- ref : np.ndarray, shape=(n,) est : np.ndarray, shape=(n,) Reference and estimate ranked lists. `ref[i]` is the relevance score for point `i`. transitive : bool If true, all pairs of reference levels are compared. If false, only adjacent pairs of reference levels are compared. Returns ------- inversions : int The number of pairs of indices `i, j` where `ref[i] < ref[j]` but `est[i] >= est[j]`. normalizer : float The total number of pairs (i, j) under consideration. If transitive=True, then this is |{(i,j) : ref[i] < ref[j]}| If transitive=False, then this is |{i,j) : ref[i] +1 = ref[j]}| ''' idx = np.argsort(ref) ref_sorted = ref[idx] est_sorted = est[idx] # Find the break-points in ref_sorted levels, positions, counts = np.unique(ref_sorted, return_index=True, return_counts=True) positions = list(positions) positions.append(len(ref_sorted)) index = collections.defaultdict(lambda: slice(0)) ref_map = collections.defaultdict(lambda: 0) for level, cnt, start, end in zip(levels, counts, positions[:-1], positions[1:]): index[level] = slice(start, end) ref_map[level] = cnt # Now that we have values sorted, apply the inversion-counter to # pairs of reference values if transitive: level_pairs = itertools.combinations(levels, 2) else: level_pairs = [(i, i+1) for i in levels] level_pairs, lcounter = itertools.tee(level_pairs) normalizer = float(sum([ref_map[i] * ref_map[j] for (i, j) in lcounter])) if normalizer == 0: return 0, 0.0 inversions = 0 for level_1, level_2 in level_pairs: inversions += _count_inversions(est_sorted[index[level_1]], est_sorted[index[level_2]]) return inversions, float(normalizer)
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/hierarchy.py#L370-L436
fuzzy match ranking
python
def bestscore(self,seq, fwd=''): """ m.bestscore(seq, fwd='') -- Returns the score of the best matching subsequence in seq. """ matches, endpoints, scores = self._scan(seq,threshold=-100000,forw_only=fwd) if scores: return max(scores) else: return -1000
https://github.com/lucapinello/Haystack/blob/cc080d741f36cd77b07c0b59d08ea6a4cf0ef2f7/haystack/external.py#L800-L806
fuzzy match ranking
python
def rank(self, X, algorithm=None): """ Returns the feature ranking. Parameters ---------- X : ndarray or DataFrame of shape n x m A matrix of n instances with m features algorithm : str or None The ranking mechanism to use, or None for the default Returns ------- ranks : ndarray An n-dimensional, symmetric array of rank scores, where n is the number of features. E.g. for 1D ranking, it is (n,), for a 2D ranking it is (n,n) and so forth. """ algorithm = algorithm or self.ranking_ algorithm = algorithm.lower() if algorithm not in self.ranking_methods: raise YellowbrickValueError( "'{}' is unrecognized ranking method".format(algorithm) ) # Extract matrix from dataframe if necessary if is_dataframe(X): X = X.values return self.ranking_methods[algorithm](X)
https://github.com/DistrictDataLabs/yellowbrick/blob/59b67236a3862c73363e8edad7cd86da5b69e3b2/yellowbrick/features/rankd.py#L148-L179
fuzzy match ranking
python
def order_by(self, *orderings: str) -> "QuerySet": """ Accept args to filter by in format like this: .. code-block:: python3 .order_by('name', '-tournament__name') Supports ordering by related models too. """ queryset = self._clone() new_ordering = [] for ordering in orderings: order_type = Order.asc if ordering[0] == "-": field_name = ordering[1:] order_type = Order.desc else: field_name = ordering if not ( field_name.split("__")[0] in self.model._meta.fields or field_name in self._annotations ): raise FieldError( "Unknown field {} for model {}".format(field_name, self.model.__name__) ) new_ordering.append((field_name, order_type)) queryset._orderings = new_ordering return queryset
https://github.com/tortoise/tortoise-orm/blob/7d16457731905e19d4d06ccd5b4ea16d4a9447b2/tortoise/queryset.py#L187-L216
fuzzy match ranking
python
async def get_next_match(self): """ Return the first open match found, or if none, the first pending match found |methcoro| Raises: APIException """ if self._final_rank is not None: return None matches = await self.get_matches(MatchState.open_) if len(matches) == 0: matches = await self.get_matches(MatchState.pending) if len(matches) > 0: return matches[0] return None
https://github.com/fp12/achallonge/blob/25780b3c48b66400a50ff9f884e4287afd4c89e4/challonge/participant.py#L172-L192
fuzzy match ranking
python
def _match(self, struct1, struct2, fu, s1_supercell=True, use_rms=False, break_on_match=False): """ Matches one struct onto the other """ ratio = fu if s1_supercell else 1/fu if len(struct1) * ratio >= len(struct2): return self._strict_match( struct1, struct2, fu, s1_supercell=s1_supercell, break_on_match=break_on_match, use_rms=use_rms) else: return self._strict_match( struct2, struct1, fu, s1_supercell=(not s1_supercell), break_on_match=break_on_match, use_rms=use_rms)
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/analysis/structure_matcher.py#L648-L661
fuzzy match ranking
python
def rank(self, method='ordinal', ascending=True, mask=NotSpecified, groupby=NotSpecified): """ Construct a new Factor representing the sorted rank of each column within each row. Parameters ---------- method : str, {'ordinal', 'min', 'max', 'dense', 'average'} The method used to assign ranks to tied elements. See `scipy.stats.rankdata` for a full description of the semantics for each ranking method. Default is 'ordinal'. ascending : bool, optional Whether to return sorted rank in ascending or descending order. Default is True. mask : zipline.pipeline.Filter, optional A Filter representing assets to consider when computing ranks. If mask is supplied, ranks are computed ignoring any asset/date pairs for which `mask` produces a value of False. groupby : zipline.pipeline.Classifier, optional A classifier defining partitions over which to perform ranking. Returns ------- ranks : zipline.pipeline.factors.Rank A new factor that will compute the ranking of the data produced by `self`. Notes ----- The default value for `method` is different from the default for `scipy.stats.rankdata`. See that function's documentation for a full description of the valid inputs to `method`. Missing or non-existent data on a given day will cause an asset to be given a rank of NaN for that day. See Also -------- :func:`scipy.stats.rankdata` :class:`zipline.pipeline.factors.factor.Rank` """ if groupby is NotSpecified: return Rank(self, method=method, ascending=ascending, mask=mask) return GroupedRowTransform( transform=rankdata if ascending else rankdata_1d_descending, transform_args=(method,), factor=self, groupby=groupby, dtype=float64_dtype, missing_value=nan, mask=mask, window_safe=True, )
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/factors/factor.py#L593-L651
fuzzy match ranking
python
def score(args): """ %prog score blastfile query.fasta A.ids Add up the scores for each query seq. Go through the lines and for each query sequence, add up the scores when subject is in each pile by A.ids. """ from jcvi.formats.base import SetFile from jcvi.formats.fasta import Fasta p = OptionParser(score.__doc__) opts, args = p.parse_args(args) if len(args) != 3: sys.exit(not p.print_help()) blastfile, fastafile, idsfile = args ids = SetFile(idsfile) blast = Blast(blastfile) scores = defaultdict(int) for b in blast: query = b.query subject = b.subject if subject not in ids: continue scores[query] += b.score logging.debug("A total of {0} ids loaded.".format(len(ids))) f = Fasta(fastafile) for s in f.iterkeys_ordered(): sc = scores.get(s, 0) print("\t".join((s, str(sc))))
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/blast.py#L444-L477
fuzzy match ranking
python
async def rank(self): """ Try to find a choice in what the user did: - If there is a quick reply, then use its payload as choice slug - Otherwise, try to match each choice with its intent """ from bernard.platforms.facebook import layers as fbl choices = self.request.get_trans_reg('choices') if not choices: return if self.request.has_layer(fbl.QuickReply): return self._rank_qr(choices) elif self.request.has_layer(l.RawText): return await self._rank_text(choices)
https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/engine/triggers.py#L246-L263
fuzzy match ranking
python
def _best_match_syn(self, sx, sys, scope_map): """ The best match is determined by the highest magnitude weight """ SUBSTRING_WEIGHT = 0.2 WBEST = None sbest = None sxv = self._standardize_label(sx.val) sxp = self._id_to_ontology(sx.class_id) for sy in sys: syv = self._standardize_label(sy.val) syp = self._id_to_ontology(sy.class_id) W = None if sxv == syv: confidence = sx.confidence * sy.confidence if sx.is_abbreviation() or sy.is_abbreviation: confidence *= self._get_config_val(sxp, 'abbreviation_confidence', 0.5) confidence *= self._get_config_val(syp, 'abbreviation_confidence', 0.5) W = scope_map[sx.scope()][sy.scope()] + logit(confidence/2) elif sxv in syv: W = np.array((-SUBSTRING_WEIGHT, SUBSTRING_WEIGHT, 0, 0)) elif syv in sxv: W = np.array((SUBSTRING_WEIGHT, -SUBSTRING_WEIGHT, 0, 0)) if W is not None: # The best match is determined by the highest magnitude weight if WBEST is None or max(abs(W)) > max(abs(WBEST)): WBEST = W sbest = sy return WBEST, sbest
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/lexmap.py#L501-L529
fuzzy match ranking
python
def rank(self, n, mu, sigma, crit=.5, upper=10000, xtol=1): """%(super)s Additional Parameters ---------------------- {0} """ return _make_rank(self, n, mu, sigma, crit=crit, upper=upper, xtol=xtol)
https://github.com/jkitzes/macroeco/blob/ee5fac5560a2d64de3a64738b5bc6833e2d7ff2e/macroeco/models/_distributions.py#L1430-L1440
fuzzy match ranking
python
def _match(self, x, op, y): """Compare the given `x` and `y` based on `op` :@param x, y, op :@type x, y: mixed :@type op: string :@return bool :@throws ValueError """ if (op not in self.condition_mapper): raise ValueError('Invalid where condition given') func = getattr(self, self.condition_mapper.get(op)) return func(x, y)
https://github.com/s1s1ty/py-jsonq/blob/9625597a2578bddcbed4e540174d5253b1fc3b75/pyjsonq/matcher.py#L162-L176
fuzzy match ranking
python
def do_matching(sorting1, sorting2, delta_tp, min_accuracy): """ This compute the matching between 2 sorters. Parameters ---------- sorting1: SortingExtractor instance sorting2: SortingExtractor instance delta_tp: int Output ---------- event_counts_1: event_counts_2 matching_event_counts_12: best_match_units_12: matching_event_counts_21: best_match_units_21: unit_map12: unit_map21: """ event_counts_1 = dict() event_counts_2 = dict() matching_event_counts_12 = dict() best_match_units_12 = dict() matching_event_counts_21 = dict() best_match_units_21 = dict() unit_map12 = dict() unit_map21 = dict() unit1_ids = sorting1.get_unit_ids() unit2_ids = sorting2.get_unit_ids() N1 = len(unit1_ids) N2 = len(unit2_ids) # Compute events counts event_counts1 = np.zeros((N1)).astype(np.int64) for i1, u1 in enumerate(unit1_ids): times1 = sorting1.get_unit_spike_train(u1) event_counts1[i1] = len(times1) event_counts_1[u1] = len(times1) event_counts2 = np.zeros((N2)).astype(np.int64) for i2, u2 in enumerate(unit2_ids): times2 = sorting2.get_unit_spike_train(u2) event_counts2[i2] = len(times2) event_counts_2[u2] = len(times2) # Compute matching events matching_event_counts = np.zeros((N1, N2)).astype(np.int64) scores = np.zeros((N1, N2)) for i1, u1 in enumerate(unit1_ids): times1 = sorting1.get_unit_spike_train(u1) for i2, u2 in enumerate(unit2_ids): times2 = sorting2.get_unit_spike_train(u2) num_matches = count_matching_events(times1, times2, delta=delta_tp) matching_event_counts[i1, i2] = num_matches scores[i1, i2] = compute_agreement_score(num_matches, event_counts1[i1], event_counts2[i2]) # Find best matches for spiketrains 1 for i1, u1 in enumerate(unit1_ids): scores0 = scores[i1, :] matching_event_counts_12[u1] = dict() if len(scores0)>0: if np.max(scores0) > 0: inds0 = np.where(scores0 > 0)[0] for i2 in inds0: matching_event_counts_12[u1][unit2_ids[i2]] = matching_event_counts[i1, i2] i2_best = np.argmax(scores0) best_match_units_12[u1] = unit2_ids[i2_best] else: best_match_units_12[u1] = -1 else: best_match_units_12[u1] = -1 # Find best matches for spiketrains 2 for i2, u2 in enumerate(unit2_ids): scores0 = scores[:, i2] matching_event_counts_21[u2] = dict() if len(scores0)>0: if np.max(scores0) > 0: inds0 = np.where(scores0 > 0)[0] for i1 in inds0: matching_event_counts_21[u2][unit1_ids[i1]] = matching_event_counts[i1, i2] i1_best = np.argmax(scores0) best_match_units_21[u2] = unit1_ids[i1_best] else: best_match_units_21[u2] = -1 else: best_match_units_21[u2] = -1 # Assign best matches [inds1, inds2] = linear_sum_assignment(-scores) inds1 = list(inds1) inds2 = list(inds2) if len(unit2_ids)>0: k2 = np.max(unit2_ids) + 1 else: k2 = 1 for i1, u1 in enumerate(unit1_ids): if i1 in inds1: aa = inds1.index(i1) i2 = inds2[aa] u2 = unit2_ids[i2] # criteria on agreement_score num_matches = matching_event_counts_12[u1].get(u2, 0) num1 = event_counts_1[u1] num2 = event_counts_2[u2] agree_score = compute_agreement_score(num_matches, num1, num2) if agree_score > min_accuracy: unit_map12[u1] = u2 else: unit_map12[u1] = -1 else: # unit_map12[u1] = k2 # k2 = k2+1 unit_map12[u1] = -1 if len(unit1_ids)>0: k1 = np.max(unit1_ids) + 1 else: k1 = 1 for i2, u2 in enumerate(unit2_ids): if i2 in inds2: aa = inds2.index(i2) i1 = inds1[aa] u1 = unit1_ids[i1] # criteria on agreement_score num_matches = matching_event_counts_12[u1].get(u2, 0) num1 = event_counts_1[u1] num2 = event_counts_2[u2] agree_score = compute_agreement_score(num_matches, num1, num2) if agree_score > min_accuracy: unit_map21[u2] = u1 else: unit_map21[u2] = -1 else: # unit_map21[u2] = k1 # k1 = k1+1 unit_map21[u2] = -1 return (event_counts_1, event_counts_2, matching_event_counts_12, best_match_units_12, matching_event_counts_21, best_match_units_21, unit_map12, unit_map21)
https://github.com/SpikeInterface/spiketoolkit/blob/f7c054383d1ebca640966b057c087fa187955d13/spiketoolkit/comparison/comparisontools.py#L35-L190
fuzzy match ranking
python
def rank(self): """ Returns an ``int`` of the team's rank at the time the game was played. """ rank = re.findall(r'\d+', self._rank) if len(rank) == 0: return None return rank[0]
https://github.com/roclark/sportsreference/blob/ea0bae432be76450e137671d2998eb38f962dffd/sportsreference/ncaaf/schedule.py#L235-L242
fuzzy match ranking
python
def fuzzyfinder(text, collection): """https://github.com/amjith/fuzzyfinder""" suggestions = [] if not isinstance(text, six.text_type): text = six.u(text) pat = '.*?'.join(map(re.escape, text)) regex = re.compile(pat, flags=re.IGNORECASE) for item in collection: r = regex.search(item) if r: suggestions.append((len(r.group()), r.start(), item)) return (z for _, _, z in sorted(suggestions))
https://github.com/eliangcs/http-prompt/blob/189321f25e3526fa1b79a9dc38c317892c478986/http_prompt/completer.py#L53-L65
fuzzy match ranking
python
def _rank_qr(self, choices): """ Look for the QuickReply layer's slug into available choices. """ from bernard.platforms.facebook import layers as fbl try: qr = self.request.get_layer(fbl.QuickReply) self.chosen = choices[qr.slug] self.slug = qr.slug if self.when is None or self.when == qr.slug: return 1.0 except KeyError: pass
https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/engine/triggers.py#L200-L214
fuzzy match ranking
python
def fuzzy_match_tipnames(ttree, names, wildcard, regex, mrca=True, mono=True): """ Used in multiple internal functions (e.g., .root()) and .drop_tips()) to select an internal mrca node, or multiple tipnames, using fuzzy matching so that every name does not need to be written out by hand. name: verbose list wildcard: matching unique string regex: regex expression mrca: return mrca node of selected tipnames. mono: raise error if selected tipnames are not monophyletic """ # require arguments if not any([names, wildcard, regex]): raise ToytreeError( "must enter an outgroup, wildcard selector, or regex pattern") # get list of **nodes** from {list, wildcard, or regex} tips = [] if names: if isinstance(names, (str, int)): names = [names] notfound = [i for i in names if i not in ttree.get_tip_labels()] if any(notfound): raise ToytreeError( "Sample {} is not in the tree".format(notfound)) tips = [i for i in ttree.treenode.get_leaves() if i.name in names] # use regex to match tipnames elif regex: tips = [ i for i in ttree.treenode.get_leaves() if re.match(regex, i.name) ] if not any(tips): raise ToytreeError("No Samples matched the regular expression") # use wildcard substring matching elif wildcard: tips = [i for i in ttree.treenode.get_leaves() if wildcard in i.name] if not any(tips): raise ToytreeError("No Samples matched the wildcard") # build list of **tipnames** from matched nodes if not tips: raise ToytreeError("no matching tipnames") tipnames = [i.name for i in tips] # if a single tipname matched no need to check for monophyly if len(tips) == 1: if mrca: return tips[0] else: return tipnames # if multiple nodes matched, check if they're monophyletic mbool, mtype, mnames = ( ttree.treenode.check_monophyly( tipnames, "name", ignore_missing=True) ) # get mrca node node = ttree.treenode.get_common_ancestor(tips) # raise an error if required to be monophyletic but not if mono: if not mbool: raise ToytreeError( "Taxon list cannot be paraphyletic") # return tips or nodes if not mrca: return tipnames else: return node
https://github.com/eaton-lab/toytree/blob/0347ed2098acc5f707fadf52a0ecd411a6d1859c/toytree/utils.py#L424-L497
fuzzy match ranking
python
def checkMatch(input, prediction, sparse=True, verbosity=0): """ Compares the actual input with the predicted input and returns results Parameters: ----------------------------------------------- input: The actual input prediction: the predicted input verbosity: If > 0, print debugging messages sparse: If true, they are in sparse form (list of active indices) retval (foundInInput, totalActiveInInput, missingFromInput, totalActiveInPrediction) foundInInput: The number of predicted active elements that were found in the actual input totalActiveInInput: The total number of active elements in the input. missingFromInput: The number of predicted active elements that were not found in the actual input totalActiveInPrediction: The total number of active elements in the prediction """ if sparse: activeElementsInInput = set(input) activeElementsInPrediction = set(prediction) else: activeElementsInInput = set(input.nonzero()[0]) activeElementsInPrediction = set(prediction.nonzero()[0]) totalActiveInPrediction = len(activeElementsInPrediction) totalActiveInInput = len(activeElementsInInput) foundInInput = len(activeElementsInPrediction.intersection(activeElementsInInput)) missingFromInput = len(activeElementsInPrediction.difference(activeElementsInInput)) missingFromPrediction = len(activeElementsInInput.difference(activeElementsInPrediction)) if verbosity >= 1: print "preds. found in input:", foundInInput, "out of", totalActiveInPrediction, print "; preds. missing from input:", missingFromInput, "out of", \ totalActiveInPrediction, print "; unexpected active in input:", missingFromPrediction, "out of", \ totalActiveInInput return (foundInInput, totalActiveInInput, missingFromInput, totalActiveInPrediction)
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/algorithms/fdrutilities.py#L1261-L1307
fuzzy match ranking
python
def match(self, category, pattern): """Match the category.""" return fnmatch.fnmatch(category, pattern, flags=self.FNMATCH_FLAGS)
https://github.com/facelessuser/pyspelling/blob/c25d5292cc2687ad65891a12ead43f7182ca8bb3/pyspelling/flow_control/wildcard.py#L33-L36
fuzzy match ranking
python
def findBestMatch(self, needle, similarity): """ Find the best match for ``needle`` that has a similarity better than or equal to ``similarity``. Returns a tuple of ``(position, confidence)`` if a match is found, or ``None`` otherwise. *Developer's Note - Despite the name, this method actually returns the **first** result with enough similarity, not the **best** result.* """ method = cv2.TM_CCOEFF_NORMED position = None match = cv2.matchTemplate(self.haystack, needle, method) min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(match) if method == cv2.TM_SQDIFF_NORMED or method == cv2.TM_SQDIFF: confidence = min_val if min_val <= 1-similarity: # Confidence checks out position = min_loc else: confidence = max_val if max_val >= similarity: # Confidence checks out position = max_loc if not position: return None return (position, confidence)
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/TemplateMatchers.py#L16-L43
fuzzy match ranking
python
def create_rank_dicts(self, alt_scores): """ Description: Takes in the scores of the alternatives in the form alt:score and generates the dictionaries mapping alternatives to rankings and rankings to alternatives. Parameters: alt_scores: dictionary of the scores of every alternative """ self.alts_to_ranks = dict() cur_score = max(alt_scores.values()) cur_rank = 0 self.ranks_to_alts = {cur_rank:[]} for i in sorted(alt_scores.keys(), key=lambda x: -alt_scores[x]): if alt_scores[i] == cur_score: self.ranks_to_alts[cur_rank].append(i) elif alt_scores[i] < cur_score: cur_rank += 1 cur_score = alt_scores[i] self.ranks_to_alts[cur_rank] = [i] self.alts_to_ranks[i] = cur_rank
https://github.com/PrefPy/prefpy/blob/f395ba3782f05684fa5de0cece387a6da9391d02/prefpy/aggregate.py#L71-L91
fuzzy match ranking
python
def all_matches(self, target, choices, group=False, include_rank=False): """\ Get all choices listed from best match to worst match. If `group` is `True`, then matches are grouped based on their distance returned from `get_distance(target, choice)` and returned as an iterator. Otherwise, an iterator of all matches is returned. For example :: from howabout import all_matches choices = ['pot', 'cat', 'bat'] for group in all_matches('hat', choices, group=True): print(list(group)) # ['bat', 'cat'] # ['pot'] :param target: a string :param choices: a list or iterable of strings to compare with `target` string :param group: if `True`, group """ dist = self.get_distance # Keep everything here as an iterator in case we're given a lot of # choices matched = ((dist(target, choice), choice) for choice in choices) matched = sorted(matched) if group: for rank, choices in groupby(matched, key=lambda m: m[0]): yield map(lambda m: m[1], choices) else: for rank, choice in matched: yield choice
https://github.com/chrlie/howabout/blob/780cacbdd9156106cc77f643c75191a824b034bb/src/howabout/__init__.py#L68-L109
fuzzy match ranking
python
def search_people_by_bio(query, limit_results=DEFAULT_LIMIT, index=['onename_people_index']): """ queries lucene index to find a nearest match, output is profile username """ from pyes import QueryStringQuery, ES conn = ES() q = QueryStringQuery(query, search_fields=['username', 'profile_bio'], default_operator='and') results = conn.search(query=q, size=20, indices=index) count = conn.count(query=q) count = count.count # having 'or' gives more results but results quality goes down if(count == 0): q = QueryStringQuery(query, search_fields=['username', 'profile_bio'], default_operator='or') results = conn.search(query=q, size=20, indices=index) results_list = [] counter = 0 for profile in results: username = profile['username'] results_list.append(username) counter += 1 if(counter == limit_results): break return results_list
https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/api/search/substring_search.py#L172-L210
fuzzy match ranking
python
def _is_better_match(x, y, matched_a, matched_b, attributes_dict_a, attributes_dict_b): """ :param x: The first element of a possible match. :param y: The second element of a possible match. :param matched_a: The current matches for the first set. :param matched_b: The current matches for the second set. :param attributes_dict_a: The attributes for each element in the first set. :param attributes_dict_b: The attributes for each element in the second set. :returns: True/False """ attributes_x = attributes_dict_a[x] attributes_y = attributes_dict_b[y] if x in matched_a: attributes_match = attributes_dict_b[matched_a[x]] if _euclidean_dist(attributes_x, attributes_y) >= _euclidean_dist(attributes_x, attributes_match): return False if y in matched_b: attributes_match = attributes_dict_a[matched_b[y]] if _euclidean_dist(attributes_x, attributes_y) >= _euclidean_dist(attributes_y, attributes_match): return False return True
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/analyses/bindiff.py#L131-L151
fuzzy match ranking
python
def wildcards_overlap(name1, name2): """Return true if two wildcard patterns can match the same string.""" if not name1 and not name2: return True if not name1 or not name2: return False for matched1, matched2 in _character_matches(name1, name2): if wildcards_overlap(name1[matched1:], name2[matched2:]): return True return False
https://github.com/yougov/mongo-connector/blob/557cafd4b54c848cd54ef28a258391a154650cb4/mongo_connector/namespace_config.py#L344-L353
fuzzy match ranking
python
def closest_match(match, specs, depth=0): """ Recursively iterates over type, group, label and overlay key, finding the closest matching spec. """ new_specs = [] match_lengths = [] for i, spec in specs: if spec[0] == match[0]: new_specs.append((i, spec[1:])) else: if all(isinstance(s[0], basestring) for s in [spec, match]): match_length = max(i for i in range(len(match[0])) if match[0].startswith(spec[0][:i])) elif is_number(match[0]) and is_number(spec[0]): match_length = -abs(match[0]-spec[0]) else: match_length = 0 match_lengths.append((i, match_length, spec[0])) if len(new_specs) == 1: return new_specs[0][0] elif new_specs: depth = depth+1 return closest_match(match[1:], new_specs, depth) else: if depth == 0 or not match_lengths: return None else: return sorted(match_lengths, key=lambda x: -x[1])[0][0]
https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/core/util.py#L2034-L2063
fuzzy match ranking
python
def count(self, index=None, doc_type=None, body=None, **query_params): """ Execute a query and get the number of matches for that query. `<http://www.elastic.co/guide/en/elasticsearch/reference/current/search-count.html>`_ :param index: A comma-separated list of indices to restrict the results :param doc_type: A comma-separated list of types to restrict the results :param body: A query to restrict the results specified with the Query DSL (optional) :arg allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) :arg analyze_wildcard: Specify whether wildcard and prefix queries should be analyzed (default: false) :arg analyzer: The analyzer to use for the query string :arg default_operator: The default operator for query string query (AND or OR), default 'OR', valid choices are: 'AND', 'OR' :arg df: The field to use as default where no field prefix is given in the query string :arg expand_wildcards: Whether to expand wildcard expression to concrete indices that are open, closed or both., default 'open', valid choices are: 'open', 'closed', 'none', 'all' :arg ignore_unavailable: Whether specified concrete indices should be ignored when unavailable (missing or closed) :arg lenient: Specify whether format-based query failures (such as providing text to a numeric field) should be ignored :arg lowercase_expanded_terms: Specify whether query terms should be lowercased :arg min_score: Include only documents with a specific `_score` value in the result :arg preference: Specify the node or shard the operation should be performed on (default: random) :arg q: Query in the Lucene query string syntax :arg routing: Specific routing value """ if doc_type and not index: index = EsConst.ALL_VALUES path = self._es_parser.make_path(index, doc_type, EsMethods.COUNT) result = yield self._perform_request(HttpMethod.GET, path, body, params=query_params) returnValue(result)
https://github.com/avihad/twistes/blob/9ab8f5aa088b8886aefe3dec85a400e5035e034a/twistes/client.py#L497-L536
fuzzy match ranking
python
def best_score(self, seqs, scan_rc=True, normalize=False): """ give the score of the best match of each motif in each sequence returns an iterator of lists containing floats """ self.set_threshold(threshold=0.0) if normalize and len(self.meanstd) == 0: self.set_meanstd() means = np.array([self.meanstd[m][0] for m in self.motif_ids]) stds = np.array([self.meanstd[m][1] for m in self.motif_ids]) for matches in self.scan(seqs, 1, scan_rc): scores = np.array([sorted(m, key=lambda x: x[0])[0][0] for m in matches if len(m) > 0]) if normalize: scores = (scores - means) / stds yield scores
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/scanner.py#L533-L548
fuzzy match ranking
python
def matchlist_by_account( self, region, encrypted_account_id, queue=None, begin_time=None, end_time=None, begin_index=None, end_index=None, season=None, champion=None, ): """ Get matchlist for ranked games played on given account ID and platform ID and filtered using given filter parameters, if any A number of optional parameters are provided for filtering. It is up to the caller to ensure that the combination of filter parameters provided is valid for the requested account, otherwise, no matches may be returned. Note that if either beginIndex or endIndex are specified, then both must be specified and endIndex must be greater than beginIndex. If endTime is specified, but not beginTime, then beginTime is effectively the start of the account's match history. If beginTime is specified, but not endTime, then endTime is effectively the current time. Note that endTime should be greater than beginTime if both are specified, although there is no maximum limit on their range. :param string region: The region to execute this request on :param string encrypted_account_id: The account ID. :param Set[int] queue: Set of queue IDs for which to filtering matchlist. :param long begin_time: The begin time to use for filtering matchlist specified as epoch milliseconds. :param long end_time: The end time to use for filtering matchlist specified as epoch milliseconds. :param int begin_index: The begin index to use for filtering matchlist. :param int end_index: The end index to use for filtering matchlist. :param Set[int] season: Set of season IDs for which to filtering matchlist. :param Set[int] champion: Set of champion IDs for which to filtering matchlist. :returns: MatchlistDto """ url, query = MatchApiV4Urls.matchlist_by_account( region=region, encrypted_account_id=encrypted_account_id, queue=queue, beginTime=begin_time, endTime=end_time, beginIndex=begin_index, endIndex=end_index, season=season, champion=champion, ) return self._raw_request(self.matchlist_by_account.__name__, region, url, query)
https://github.com/pseudonym117/Riot-Watcher/blob/21ab12453a0d824d67e30f5514d02a5c5a411dea/src/riotwatcher/_apis/MatchApiV4.py#L32-L88
fuzzy match ranking
python
def reportMatchCompletion(cfg, results, replayData): """send information back to the server about the match's winners/losers""" payload = json.dumps([cfg.flatten(), results, replayData]) ladder = cfg.ladder return requests.post( url = c.URL_BASE%(ladder.ipAddress, ladder.serverPort, "matchfinished"), data = payload, #headers=headers, )
https://github.com/ttinies/sc2gameLobby/blob/5352d51d53ddeb4858e92e682da89c4434123e52/sc2gameLobby/connectToServer.py#L57-L65
fuzzy match ranking
python
def _get_rank(self, team): """ Find the team's rank when applicable. If a team is ranked, it will showup in a separate <span> tag with the actual rank embedded between parentheses. When a team is ranked, the integer value representing their ranking should be returned. For teams that are not ranked, None should be returned. Parameters ---------- team : PyQuery object A PyQuery object of a team's HTML tag in the boxscore. Returns ------- int Returns an integer representing the team's ranking when applicable, or None if the team is not ranked. """ rank = None rank_field = team('span[class="pollrank"]') if len(rank_field) > 0: rank = re.findall(r'\(\d+\)', str(rank_field))[0] rank = int(rank.replace('(', '').replace(')', '')) return rank
https://github.com/roclark/sportsreference/blob/ea0bae432be76450e137671d2998eb38f962dffd/sportsreference/ncaab/boxscore.py#L1717-L1742
fuzzy match ranking
python
def matches(self,string,fuzzy=90,fname_match=True,fuzzy_fragment=None,guess=False): '''Returns whether this :class:`Concept` matches ``string``''' matches = [] for item in self.examples: m = best_match_from_list(string,self.examples[item],fuzzy,fname_match,fuzzy_fragment,guess) if m: match = ConceptMatch(self) match.concept = self match.string = string match.item = item match.examples = m[0] match.type = m[2] match.amount = m[3] matches.append(match) return sort_matches(matches)
https://github.com/azraq27/gini/blob/3c2b5265d096d606b303bfe25ac9adb74b8cee14/gini/semantics.py#L72-L87
fuzzy match ranking
python
def do_counting(sorting1, sorting2, delta_tp, unit_map12): """ This count all counting score possible lieke: * TP: true positive * CL: classification error * FN: False negative * FP: False positive * TOT: * TOT_ST1: * TOT_ST2: Parameters ---------- sorting1: SortingExtractor instance The ground truth sorting. sorting2: SortingExtractor instance The tested sorting. delta_tp: int unit_map12: dict Dict of matching from sorting1 to sorting2. Output ---------- counts: dict A dict containing all coutning labels_st1: np.array of str Contain label for units of sorting 1 labels_st2: np.array of str Contain label for units of sorting 2 """ unit1_ids = sorting1.get_unit_ids() unit2_ids = sorting2.get_unit_ids() labels_st1 = dict() labels_st2 = dict() N1 = len(unit1_ids) N2 = len(unit2_ids) # copy spike trains for faster access from extractors with memmapped data #~ sts1 = [] #~ for u in sorting1.get_unit_ids(): #~ sts1.append(sorting1.get_unit_spike_train(u)) #~ sts2 = [] #~ for u in sorting2.get_unit_ids(): #~ sts2.append(sorting2.get_unit_spike_train(u)) sts1 = {u1: sorting1.get_unit_spike_train(u1) for u1 in unit1_ids} sts2 = {u2: sorting2.get_unit_spike_train(u2) for u2 in unit2_ids} # Evaluate for u1 in unit1_ids: lab_st1 = np.array(['UNPAIRED'] * len(sts1[u1])) labels_st1[u1] = lab_st1 for u2 in unit2_ids: lab_st2 = np.array(['UNPAIRED'] * len(sts2[u2])) labels_st2[u2] = lab_st2 for u1 in unit1_ids: u2 = unit_map12[u1] if u2 !=-1: lab_st1 = labels_st1[u1] lab_st2 = labels_st2[u2] mapped_st = sorting2.get_unit_spike_train(u2) # from gtst: TP, TPO, TPSO, FN, FNO, FNSO for sp_i, n_sp in enumerate(sts1[u1]): matches = (np.abs(mapped_st.astype(int)-n_sp)<=delta_tp//2) if np.sum(matches) > 0: lab_st1[sp_i] = 'TP' lab_st2[np.where(matches)[0][0]] = 'TP' else: lab_st1 = np.array(['FN'] * len(sts1[u1])) labels_st1[u1] = lab_st1 # find CL-CLO-CLSO for u1 in unit1_ids: lab_st1 = labels_st1[u1] st1 = sts1[u1] for l_gt, lab in enumerate(lab_st1): if lab == 'UNPAIRED': for u2 in unit2_ids: if u2 in unit_map12.values() and unit_map12[u1] != -1: lab_st2 = labels_st2[u2] n_sp = st1[l_gt] mapped_st = sts2[u2] matches = (np.abs(mapped_st.astype(int)-n_sp)<=delta_tp//2) if np.sum(matches) > 0: lab_st1[l_gt] = 'CL_' + str(u1) + '_' + str(u2) lab_st2[np.where(matches)[0][0]] = 'CL_' + str(u2) + '_' + str(u1) for u1 in unit1_ids: lab_st1 = labels_st1[u1] for l_gt, lab in enumerate(lab_st1): if lab == 'UNPAIRED': lab_st1[l_gt] = 'FN' for u2 in unit2_ids: lab_st2 = labels_st2[u2] for l_gt, lab in enumerate(lab_st2): if lab == 'UNPAIRED': lab_st2[l_gt] = 'FP' TOT_ST1 = sum([len(sts1[u1]) for u1 in unit1_ids]) TOT_ST2 = sum([len(sts2[u2]) for u2 in unit2_ids]) total_spikes = TOT_ST1 + TOT_ST2 TP = sum([len(np.where('TP' == labels_st1[unit])[0]) for unit in unit1_ids]) CL = sum([len([i for i, v in enumerate(labels_st1[u1]) if 'CL' in v]) for u1 in unit1_ids]) FN = sum([len(np.where('FN' == labels_st1[u1])[0]) for u1 in unit1_ids]) FP = sum([len(np.where('FP' == labels_st2[u2])[0]) for u2 in unit2_ids]) counts = {'TP': TP, 'CL': CL, 'FN': FN, 'FP': FP, 'TOT': total_spikes, 'TOT_ST1': TOT_ST1, 'TOT_ST2': TOT_ST2} return counts, labels_st1, labels_st2
https://github.com/SpikeInterface/spiketoolkit/blob/f7c054383d1ebca640966b057c087fa187955d13/spiketoolkit/comparison/comparisontools.py#L197-L315
fuzzy match ranking
python
def fuzzy_match(self, other): """ Given another token, see if either the major alias identifier matches the other alias, or if magic matches the alias. """ magic, fuzzy = False, False try: magic = self.alias == other.magic except AttributeError: pass if '.' in self.alias: major = self.alias.split('.')[0] fuzzy = major == other.alias return magic or fuzzy
https://github.com/TorkamaniLab/metapipe/blob/15592e5b0c217afb00ac03503f8d0d7453d4baf4/metapipe/models/tokens.py#L98-L111
fuzzy match ranking
python
def _match(self, check): """Find all the matches for a check dict.""" matches = [] tests = {} for k,v in check.iteritems(): if isinstance(v, dict): tests[k] = CompositeFilter(v) else: tests[k] = lambda o: o==v for rec in self._records.itervalues(): if self._match_one(rec, tests): matches.append(copy(rec)) return matches
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/parallel/controller/dictdb.py#L111-L124
fuzzy match ranking
python
def match(fullname1, fullname2, strictness='default', options=None): """ Takes two names and returns true if they describe the same person. :param string fullname1: first human name :param string fullname2: second human name :param string strictness: strictness settings to use :param dict options: custom strictness settings updates :return bool: the names match """ if options is not None: settings = deepcopy(SETTINGS[strictness]) deep_update_dict(settings, options) else: settings = SETTINGS[strictness] name1 = Name(fullname1) name2 = Name(fullname2) return name1.deep_compare(name2, settings)
https://github.com/rliebz/whoswho/blob/0c411e418c240fcec6ea0a23d15bd003056c65d0/whoswho/who.py#L8-L28
fuzzy match ranking
python
def getRankMap(self): """ Returns a dictionary that associates the integer representation of each candidate with its position in the ranking, starting from 1. """ # We sort the candidates based on the number of incoming edges they have in the graph. If # two candidates have the same number, we assume that they are tied. incEdgesMap = self.getIncEdgesMap() sortedKeys = sorted(incEdgesMap.keys(), reverse = True) rankMap = dict() pos = 1 for key in sortedKeys: cands = incEdgesMap[key] for cand in cands: rankMap[cand] = pos pos += 1 return rankMap
https://github.com/PrefPy/prefpy/blob/f395ba3782f05684fa5de0cece387a6da9391d02/prefpy/preference.py#L76-L93
fuzzy match ranking
python
def get_order_matchresults(self, order_id, _async=False): """ 查询某个订单的成交明细 :param order_id: :return: """ params = {} path = f'/v1/order/orders/{order_id}/matchresults' return api_key_get(params, path, _async=_async)
https://github.com/hadrianl/huobi/blob/bbfa2036703ee84a76d5d8e9f89c25fc8a55f2c7/huobitrade/service.py#L298-L306
fuzzy match ranking
python
def create_ranking(ranking_tuples): """ Create and return a string suitable for the rankings field when given tuples of choices and rankings. Parameters: ranking_tuples should be an iterable of tuples of form (choice, ranking) """ return ",".join([str(r) for c, r in sorted( ranking_tuples, key=lambda x: x[0].pk )])
https://github.com/knagra/farnsworth/blob/1b6589f0d9fea154f0a1e2231ed906764ed26d26/elections/models.py#L336-L346
fuzzy match ranking
python
def rank(keys, axis=semantics.axis_default): """where each item is in the pecking order. Parameters ---------- keys : indexable object Returns ------- ndarray, [keys.size], int unique integers, ranking the sorting order Notes ----- we should have that index.sorted[index.rank] == keys """ index = as_index(keys, axis) return index.rank
https://github.com/EelcoHoogendoorn/Numpy_arraysetops_EP/blob/84dc8114bf8a79c3acb3f7f59128247b9fc97243/numpy_indexed/funcs.py#L199-L216
fuzzy match ranking
python
def _find_fuzzy_line( py_line_no, py_by_line_no, cheetah_by_line_no, prefer_first ): """Attempt to fuzzily find matching lines.""" stripped_line = _fuzz_py_line(py_by_line_no[py_line_no]) cheetah_lower_bound, cheetah_upper_bound = _find_bounds( py_line_no, py_by_line_no, cheetah_by_line_no, ) sliced = list(enumerate(cheetah_by_line_no))[ cheetah_lower_bound:cheetah_upper_bound ] if not prefer_first: sliced = reversed(sliced) for line_no, line in sliced: if stripped_line in _fuzz_cheetah_line(line): return line_no else: # We've failed to find a matching line return 0
https://github.com/asottile/cheetah_lint/blob/1ecd54933e63073a7e77d65c8a2514a21e145c34/cheetah_lint/flake.py#L184-L204
fuzzy match ranking
python
def squad_v1_exact_match(y_true: List[List[str]], y_predicted: List[str]) -> float: """ Calculates Exact Match score between y_true and y_predicted EM score uses the best matching y_true answer: if y_pred equal at least to one answer in y_true then EM = 1, else EM = 0 Skips examples without an answer. Args: y_true: list of correct answers (correct answers are represented by list of strings) y_predicted: list of predicted answers Returns: exact match score : float """ EM_total = 0 count = 0 for ground_truth, prediction in zip(y_true, y_predicted): if len(ground_truth[0]) == 0: # skip empty answers continue count += 1 EMs = [int(normalize_answer(gt) == normalize_answer(prediction)) for gt in ground_truth] EM_total += max(EMs) return 100 * EM_total / count if count > 0 else 0
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/metrics/squad_metrics.py#L44-L64
fuzzy match ranking
python
def _match_show(self, show): """Match a query for a specific show/list of shows""" if self.show: return match_list(self.show, show) else: return True
https://github.com/sharibarboza/py_zap/blob/ce90853efcad66d3e28b8f1ac910f275349d016c/py_zap/py_zap.py#L212-L217
fuzzy match ranking
python
def suggest_pairs(top_n=10, per_n=3, ignore_before=300): """ Find the maximally interesting pairs of players to match up First, sort the ratings by uncertainty. Then, take the ten highest players with the highest uncertainty For each of them, call them `p1` Sort all the models by their distance from p1's rating and take the 20 nearest rated models. ('candidate_p2s') Choose pairings, (p1, p2), randomly from this list. `top_n` will pair the top n models by uncertainty. `per_n` will give each of the top_n models this many opponents `ignore_before` is the model number to `filter` off, i.e., the early models. Returns a list of *model numbers*, not model ids. """ db = sqlite3.connect("ratings.db") data = db.execute("select model_winner, model_loser from wins").fetchall() bucket_ids = [id[0] for id in db.execute( "select id from models where bucket = ?", (fsdb.models_dir(),)).fetchall()] bucket_ids.sort() data = [d for d in data if d[0] in bucket_ids and d[1] in bucket_ids] ratings = [(model_num_for(k), v[0], v[1]) for k, v in compute_ratings(data).items()] ratings.sort() ratings = ratings[ignore_before:] # Filter off the first 100 models, which improve too fast. ratings.sort(key=lambda r: r[2], reverse=True) res = [] for p1 in ratings[:top_n]: candidate_p2s = sorted(ratings, key=lambda p2_tup: abs(p1[1] - p2_tup[1]))[1:20] choices = random.sample(candidate_p2s, per_n) print("Pairing {}, sigma {:.2f} (Rating {:.2f})".format(p1[0], p1[2], p1[1])) for p2 in choices: res.append([p1[0], p2[0]]) print(" {}, ratings delta {:.2f}".format(p2[0], abs(p1[1] - p2[1]))) return res
https://github.com/mlperf/training/blob/1c6ae725a81d15437a2b2df05cac0673fde5c3a4/reinforcement/tensorflow/minigo/ratings/ratings.py#L233-L268
fuzzy match ranking
python
def fuzzy_like_this(cls, like_text, fields=None, ignore_tf=None, max_query_terms=None, min_similarity=None, prefix_length=None, boost=None, analyzer=None): ''' http://www.elasticsearch.org/guide/reference/query-dsl/flt-query.html Fuzzy like this query find documents that are "like" provided text by running it against one or more fields. > query = ElasticQuery().fuzzy_like_this('text like this one', fields=['name.first', 'name.last'], max_query_terms=12) > query {'fuzze_like_this': {'boost': 1.0, 'fields': ['name.first', 'name.last'], 'ifgnore_tf': False, 'like_text': 'text like this one', 'max_query_terms': 12, 'min_similarity': 0.5, 'prefix_length': 0}} ''' instance = cls(fuzzy_like_this={'like_text': like_text}) if fields is not None: instance['fuzzy_like_this']['fields'] = fields if ignore_tf is not None: instance['fuzzy_like_this']['ignore_tf'] = ignore_tf if max_query_terms is not None: instance['fuzzy_like_this']['max_query_terms'] = max_query_terms if min_similarity is not None: instance['fuzzy_like_this']['min_similarity'] = min_similarity if prefix_length is not None: instance['fuzzy_like_this']['prefix_length'] = prefix_length if boost is not None: instance['fuzzy_like_this']['boost'] = boost if analyzer is not None: instance['fuzzy_like_this']['analyzer'] = analyzer return instance
https://github.com/ooici/elasticpy/blob/ec221800a80c39e80d8c31667c5b138da39219f2/elasticpy/query.py#L171-L203
fuzzy match ranking
python
def match_name(self, in_string, fuzzy=False): """Match a color to a sRGB value. The matching will be based purely on the input string and the color names in the registry. If there's no direct hit, a fuzzy matching algorithm is applied. This method will never fail to return a sRGB value, but depending on the score, it might or might not be a sensible result – as a rule of thumb, any score less then 90 indicates that there's a lot of guessing going on. It's the callers responsibility to judge if the return value should be trusted. In normalization terms, this method implements "normalize an arbitrary color name to a sRGB value". Args: in_string (string): The input string containing something resembling a color name. fuzzy (bool, optional): Try fuzzy matching if no exact match was found. Defaults to ``False``. Returns: A named tuple with the members `hex_code` and `score`. Raises: ValueError: If ``fuzzy`` is ``False`` and no match is found Examples: >>> tint_registry = TintRegistry() >>> tint_registry.match_name("rather white", fuzzy=True) MatchResult(hex_code=u'ffffff', score=95) """ in_string = _normalize(in_string) if in_string in self._hex_by_color: return MatchResult(self._hex_by_color[in_string], 100) if not fuzzy: raise ValueError("No match for %r found." % in_string) # We want the standard scorer *plus* the set scorer, because colors are often # (but not always) related by sub-strings color_names = self._hex_by_color.keys() set_match = dict(fuzzywuzzy.process.extract( in_string, color_names, scorer=fuzzywuzzy.fuzz.token_set_ratio )) standard_match = dict(fuzzywuzzy.process.extract(in_string, color_names)) # This would be much easier with a collections.Counter, but alas! it's a 2.7 feature. key_union = set(set_match) | set(standard_match) counter = ((n, set_match.get(n, 0) + standard_match.get(n, 0)) for n in key_union) color_name, score = sorted(counter, key=operator.itemgetter(1))[-1] return MatchResult(self._hex_by_color[color_name], score / 2)
https://github.com/solute/python-tint/blob/a09e44147b9fe81a67892901960f5b8350821c95/tint/registry.py#L156-L209
fuzzy match ranking
python
def matching_details(song_name, song_title, artist): ''' Provides a score out of 10 that determines the relevance of the search result ''' match_name = difflib.SequenceMatcher(None, song_name, song_title).ratio() match_title = difflib.SequenceMatcher(None, song_name, artist + song_title).ratio() if max(match_name,match_title) >= 0.55: return True, max(match_name,match_title) else: return False, (match_name + match_title) / 2
https://github.com/kalbhor/MusicNow/blob/12ff1ed2ea2bb7dbbfd925d7998b3ea1e20de291/musicnow/repair.py#L60-L73
fuzzy match ranking
python
def __match_interval_overlaps(query, intervals_to, candidates): # pragma: no cover '''Find the best Jaccard match from query to candidates''' best_score = -1 best_idx = -1 for idx in candidates: score = __jaccard(query, intervals_to[idx]) if score > best_score: best_score, best_idx = score, idx return best_idx
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/util/matching.py#L49-L59
fuzzy match ranking
python
def squad_v2_exact_match(y_true: List[List[str]], y_predicted: List[str]) -> float: """ Calculates Exact Match score between y_true and y_predicted EM score uses the best matching y_true answer: if y_pred equal at least to one answer in y_true then EM = 1, else EM = 0 The same as in SQuAD-v2.0 Args: y_true: list of correct answers (correct answers are represented by list of strings) y_predicted: list of predicted answers Returns: exact match score : float """ EM_total = sum(normalize_answer(prediction) in map(normalize_answer, ground_truth) for ground_truth, prediction in zip(y_true, y_predicted)) return 100 * EM_total / len(y_true) if len(y_true) > 0 else 0
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/metrics/squad_metrics.py#L24-L40
fuzzy match ranking
python
def matches_query(self, key, query): """ 增加查询条件,限制查询结果对象指定字段的值,与另外一个查询对象的返回结果相同。 :param key: 查询条件字段名 :param query: 查询对象 :type query: Query :rtype: Query """ dumped = query.dump() dumped['className'] = query._query_class._class_name self._add_condition(key, '$inQuery', dumped) return self
https://github.com/leancloud/python-sdk/blob/fea3240257ce65e6a32c7312a5cee1f94a51a587/leancloud/query.py#L446-L458
fuzzy match ranking
python
def get_order_matchresults(self, order_id): """ 查询某个订单的成交明细 :param order_id: :return: """ params = {} path = f'/v1/order/orders/{order_id}/matchresults' def _wrapper(_func): @wraps(_func) def handle(): _func(api_key_get(params, path)) return handle return _wrapper
https://github.com/hadrianl/huobi/blob/bbfa2036703ee84a76d5d8e9f89c25fc8a55f2c7/huobitrade/service.py#L1022-L1038
fuzzy match ranking
python
def get_suggestions(self, prefix, fuzzy = False, num = 10, with_scores = False, with_payloads=False): """ Get a list of suggestions from the AutoCompleter, for a given prefix ### Parameters: - **prefix**: the prefix we are searching. **Must be valid ascii or utf-8** - **fuzzy**: If set to true, the prefix search is done in fuzzy mode. **NOTE**: Running fuzzy searches on short (<3 letters) prefixes can be very slow, and even scan the entire index. - **with_scores**: if set to true, we also return the (refactored) score of each suggestion. This is normally not needed, and is NOT the original score inserted into the index - **with_payloads**: Return suggestion payloads - **num**: The maximum number of results we return. Note that we might return less. The algorithm trims irrelevant suggestions. Returns a list of Suggestion objects. If with_scores was False, the score of all suggestions is 1. """ args = [AutoCompleter.SUGGET_COMMAND, self.key, prefix, 'MAX', num] if fuzzy: args.append(AutoCompleter.FUZZY) if with_scores: args.append(AutoCompleter.WITHSCORES) if with_payloads: args.append(AutoCompleter.WITHPAYLOADS) ret = self.redis.execute_command(*args) results = [] if not ret: return results parser = SuggestionParser(with_scores, with_payloads, ret) return [s for s in parser]
https://github.com/RediSearch/redisearch-py/blob/f65d1dd078713cbe9b83584e86655a254d0531ab/redisearch/auto_complete.py#L115-L145
fuzzy match ranking
python
def get_qualification_type_by_name(self, name): """Return a Qualification Type by name. If the provided name matches more than one Qualification, check to see if any of the results match the provided name exactly. If there's an exact match, return that Qualification. Otherwise, raise an exception. """ max_fuzzy_matches_to_check = 100 query = name.upper() start = time.time() args = { "Query": query, "MustBeRequestable": False, "MustBeOwnedByCaller": True, "MaxResults": max_fuzzy_matches_to_check, } results = self.mturk.list_qualification_types(**args)["QualificationTypes"] # This loop is largely for tests, because there's some indexing that # needs to happen on MTurk for search to work: while not results and time.time() - start < self.max_wait_secs: time.sleep(1) results = self.mturk.list_qualification_types(**args)["QualificationTypes"] if not results: return None qualifications = [self._translate_qtype(r) for r in results] if len(qualifications) > 1: for qualification in qualifications: if qualification["name"].upper() == query: return qualification raise MTurkServiceException("{} was not a unique name".format(query)) return qualifications[0]
https://github.com/Dallinger/Dallinger/blob/76ca8217c709989c116d0ebd8fca37bd22f591af/dallinger/mturk.py#L207-L240
fuzzy match ranking
python
def film_search(self, title): """film search using fuzzy matching""" films = [] #check for cache or update if not hasattr(self, 'film_list'): self.get_film_list() #iterate over films and check for fuzzy string match for film in self.film_list: strength = WRatio(title, film['title']) if strength > 80: film.update({u'strength':strength}) films.append(film) #sort films by the strength of the fuzzy string match films_sorted = sorted(films, key=itemgetter('strength'), reverse = True) return films_sorted
https://github.com/oracal/cineworld/blob/073b18ce4f3acf4c44b26a5af1cc0d3c71b8b5d5/cineworld/cineworld.py#L81-L95
fuzzy match ranking
python
def matches(self, string, context=None): """ Search for all matches with current configuration against input_string :param string: string to search into :type string: str :param context: context to use :type context: dict :return: A custom list of matches :rtype: Matches """ matches = Matches(input_string=string) if context is None: context = {} self._matches_patterns(matches, context) self._execute_rules(matches, context) return matches
https://github.com/Toilal/rebulk/blob/7511a4671f2fd9493e3df1e5177b7656789069e8/rebulk/rebulk.py#L272-L290
fuzzy match ranking
python
def get_match(session, match_id): """Get match metadata.""" url = '{}{}/{}'.format(session.auth.base_url, MATCH_URL, match_id) parsed = make_scrape_request(session, url) game = parsed.find('h3').text if game != GAME_AOC: raise ValueError('not an aoc match') date_played = parsed.find(text=MATCH_DATE_PLAYED).find_next('td').text players = [] colors = {} player_count = int(parsed.find('td', text='Players:').find_next('td').text) for div in parsed.find_all('div', style=True): if not div['style'].startswith('background-color:'): continue if len(players) == player_count: break username_elem = div.find_next('a', href=re.compile(PROFILE_PATH)) username = username_elem.text color = div['style'].split(':')[1].split(';')[0].strip() colors[username] = color rec = None for dl_elem in parsed.find_all('a', href=re.compile('^/files/view')): rec_name = dl_elem.find('b', text=re.compile(username+'$')) if rec_name: rec = rec_name.parent user = parsed.find('a', text=username) if not user: # bugged match page continue user_id = int(user['href'].split('/')[-1]) children = list(user.find_next('span').children) rate_after = None rate_before = None if str(children[0]).strip() == MATCH_NEW_RATE: rate_after = int(children[1].text) rate_before = rate_after - int(children[3].text) elif str(children[4]).strip() == MATCH_NEW_RATE: rate_after = int(children[5].text) rate_before = rate_after - int(children[3].text) players.append({ 'url': rec['href'] if rec else None, 'id': user_id, 'username': username, 'color_id': COLOR_MAPPING.get(colors[username]), 'rate_before': rate_before, 'rate_after': rate_after }) return { 'timestamp': dateparser.parse(date_played), 'players': players }
https://github.com/happyleavesaoc/python-voobly/blob/83b4ab7d630a00459c2a64e55e3ac85c7be38194/voobly/__init__.py#L443-L495
fuzzy match ranking
python
def _match_exists(self, searchable): """Make sure the searchable description doesn't already exist """ position_searchable = self.get_position_searchable() for pos,val in position_searchable.iteritems(): if val == searchable: return pos return False
https://github.com/binbrain/OpenSesame/blob/e32c306385012646400ecb49fc65c64b14ce3a93/OpenSesame/keyring.py#L79-L86
fuzzy match ranking
python
def get_best_match(instance1, attribute1, relation1, instance2, attribute2, relation2, prefix1, prefix2, doinstance=True, doattribute=True, dorelation=True): """ Get the highest triple match number between two sets of triples via hill-climbing. Arguments: instance1: instance triples of AMR 1 ("instance", node name, node value) attribute1: attribute triples of AMR 1 (attribute name, node name, attribute value) relation1: relation triples of AMR 1 (relation name, node 1 name, node 2 name) instance2: instance triples of AMR 2 ("instance", node name, node value) attribute2: attribute triples of AMR 2 (attribute name, node name, attribute value) relation2: relation triples of AMR 2 (relation name, node 1 name, node 2 name) prefix1: prefix label for AMR 1 prefix2: prefix label for AMR 2 Returns: best_match: the node mapping that results in the highest triple matching number best_match_num: the highest triple matching number """ # Compute candidate pool - all possible node match candidates. # In the hill-climbing, we only consider candidate in this pool to save computing time. # weight_dict is a dictionary that maps a pair of node (candidate_mappings, weight_dict) = compute_pool(instance1, attribute1, relation1, instance2, attribute2, relation2, prefix1, prefix2, doinstance=doinstance, doattribute=doattribute, dorelation=dorelation) if veryVerbose: print("Candidate mappings:", file=DEBUG_LOG) print(candidate_mappings, file=DEBUG_LOG) print("Weight dictionary", file=DEBUG_LOG) print(weight_dict, file=DEBUG_LOG) best_match_num = 0 # initialize best match mapping # the ith entry is the node index in AMR 2 which maps to the ith node in AMR 1 best_mapping = [-1] * len(instance1) for i in range(iteration_num): if veryVerbose: print("Iteration", i, file=DEBUG_LOG) if i == 0: # smart initialization used for the first round cur_mapping = smart_init_mapping(candidate_mappings, instance1, instance2) else: # random initialization for the other round cur_mapping = random_init_mapping(candidate_mappings) # compute current triple match number match_num = compute_match(cur_mapping, weight_dict) if veryVerbose: print("Node mapping at start", cur_mapping, file=DEBUG_LOG) print("Triple match number at start:", match_num, file=DEBUG_LOG) while True: # get best gain (gain, new_mapping) = get_best_gain(cur_mapping, candidate_mappings, weight_dict, len(instance2), match_num) if veryVerbose: print("Gain after the hill-climbing", gain, file=DEBUG_LOG) # hill-climbing until there will be no gain for new node mapping if gain <= 0: break # otherwise update match_num and mapping match_num += gain cur_mapping = new_mapping[:] if veryVerbose: print("Update triple match number to:", match_num, file=DEBUG_LOG) print("Current mapping:", cur_mapping, file=DEBUG_LOG) if match_num > best_match_num: best_mapping = cur_mapping[:] best_match_num = match_num return best_mapping, best_match_num
https://github.com/snowblink14/smatch/blob/ad7e6553a3d52e469b2eef69d7716c87a67eedac/smatch.py#L105-L173
fuzzy match ranking
python
def conflicting(self, match, predicate=None, index=None): """ Retrieves a list of ``Match`` objects that conflicts with given match. :param match: :type match: :param predicate: :type predicate: :param index: :type index: :return: :rtype: """ ret = _BaseMatches._base() for i in range(*match.span): for at_match in self.at_index(i): if at_match not in ret: ret.append(at_match) ret.remove(match) return filter_index(ret, predicate, index)
https://github.com/Toilal/rebulk/blob/7511a4671f2fd9493e3df1e5177b7656789069e8/rebulk/match.py#L435-L456
fuzzy match ranking
python
def find_match(self): """Load the config and find a matching rule. returns the results of find_match_command, a dict of the command and (in the future) other metadata. """ self.load() for yamldoc in self.yamldocs: self.logdebug('\nchecking rule """%s"""\n' % yamldoc) if not yamldoc: continue if not self.check_client_ip(yamldoc): # Rejected - Client IP does not match continue if not self.check_keyname(yamldoc): # Rejected - keyname does not match continue rules = yamldoc.get('allow') if not isinstance(rules, list): rules = [rules] for rule in rules: rule_type = rule.get('rule_type', 'command') if rule_type == 'command': sub = self.find_match_command elif rule_type == 'scp': sub = self.find_match_scp else: self.log('fatal: no such rule_type "%s"\n' % rule_type) self.raise_and_log_error(ConfigError, 'error parsing config.') match = sub(rule) if match: return match # No matches, time to give up. raise CommandRejected('command "%s" denied.' % self.original_command_string)
https://github.com/daethnir/authprogs/blob/0b1e13a609ebeabdb0f10d11fc5dc6e0b20c0343/authprogs/authprogs.py#L370-L413
fuzzy match ranking
python
def get_ranking(self, alt): """ Description: Returns the ranking of a given alternative in the computed aggregate ranking. An error is thrown if the alternative does not exist. The ranking is the index in the aggregate ranking, which is 0-indexed. Parameters: alt: the key that represents an alternative """ if self.alts_to_ranks is None: raise ValueError("Aggregate ranking must be created first") try: rank = self.alts_to_ranks[alt] return rank except KeyError: raise KeyError("No alternative \"{}\" found in ".format(str(alt)) + "the aggregate ranking")
https://github.com/PrefPy/prefpy/blob/f395ba3782f05684fa5de0cece387a6da9391d02/prefpy/aggregate.py#L36-L53
fuzzy match ranking
python
def _get_match_and_classification(self, urls): """Get classification for all matching URLs. :param urls: a sequence of URLs to test :return: a tuple containing matching URL and classification string pertaining to it """ for url_list, response in self._query(urls): classification_set = response.text.splitlines() for url, _class in zip(url_list, classification_set): if _class != 'ok': yield url, _class
https://github.com/piotr-rusin/spam-lists/blob/fd616e8761b28f3eaa503fee5e45f7748e8f88f2/spam_lists/clients.py#L256-L267
fuzzy match ranking
python
def matches(self, verbosity=Verbosity.TERSE): """Shows partial matches and activations. Returns a tuple containing the combined sum of the matches for each pattern, the combined sum of partial matches and the number of activations. The verbosity parameter controls how much to output: * Verbosity.VERBOSE: detailed matches are printed to stdout * Verbosity.SUCCINT: a brief description is printed to stdout * Verbosity.TERSE: (default) nothing is printed to stdout """ data = clips.data.DataObject(self._env) lib.EnvMatches(self._env, self._rule, verbosity, data.byref) return tuple(data.value)
https://github.com/noxdafox/clipspy/blob/b22d71a6da821c1715d8fa00d7d75cabc09ed364/clips/agenda.py#L269-L287
fuzzy match ranking
python
def findBest(self, pattern): """ Returns the *best* match in the region (instead of the first match) """ findFailedRetry = True while findFailedRetry: best_match = None all_matches = self.findAll(pattern) for match in all_matches: if best_match is None or best_match.getScore() < match.getScore(): best_match = match self._lastMatch = best_match if best_match is not None: break path = pattern.path if isinstance(pattern, Pattern) else pattern findFailedRetry = self._raiseFindFailed("Could not find pattern '{}'".format(path)) if findFailedRetry: time.sleep(self._repeatWaitTime) return best_match
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/RegionMatching.py#L1352-L1368
fuzzy match ranking
python
def matchfirst(self, event): ''' Return first match for this event :param event: an input event ''' # 1. matches(self.index[ind], event) # 2. matches(self.any, event) # 3. self.matches if self.depth < len(event.indices): ind = event.indices[self.depth] if ind in self.index: m = self.index[ind].matchfirst(event) if m is not None: return m if hasattr(self, 'any'): m = self.any.matchfirst(event) if m is not None: return m if self._use_dict: for o, m in self.matchers_dict: if m is None or m.judge(event): return o else: for o, m in self.matchers_list: if m is None or m.judge(event): return o
https://github.com/hubo1016/vlcp/blob/239055229ec93a99cc7e15208075724ccf543bd1/vlcp/event/matchtree.py#L166-L192
fuzzy match ranking
python
async def get_match(self, m_id, force_update=False) -> Match: """ get a single match by id |methcoro| Args: m_id: match id force_update (default=False): True to force an update to the Challonge API Returns: Match Raises: APIException """ found_m = self._find_match(m_id) if force_update or found_m is None: await self.get_matches() found_m = self._find_match(m_id) return found_m
https://github.com/fp12/achallonge/blob/25780b3c48b66400a50ff9f884e4287afd4c89e4/challonge/tournament.py#L654-L674
fuzzy match ranking
python
def form_query(query_type, query): """ Returns a multi match query """ fields = [ field + "^" + str(SEARCH_BOOSTS[field]) if field in SEARCH_BOOSTS else field for field in SEARCH_FIELDS ] return Q("multi_match", fields=fields, query=query, type=query_type)
https://github.com/pypa/warehouse/blob/396e77a0caf6efeccb5a5f86e2c8a27e575bf86d/warehouse/views.py#L455-L463
fuzzy match ranking
python
def get_search_score(query, choice, ignore_case=True, apply_regex=True, template='{}'): """Returns a tuple with the enriched text (if a template is provided) and a score for the match. Parameters ---------- query : str String with letters to search in choice (in order of appearance). choice : str Sentence/words in which to search for the 'query' letters. ignore_case : bool, optional Optional value perform a case insensitive search (True by default). apply_regex : bool, optional Optional value (True by default) to perform a regex search. Useful when this function is called directly. template : str, optional Optional template string to surround letters found in choices. This is useful when using a rich text editor ('{}' by default). Examples: '<b>{}</b>', '<code>{}</code>', '<i>{}</i>' Returns ------- results : tuple Tuples where the first item is the text (enriched if a template was used) and the second item is a search score. Notes ----- The score is given according the following precedence (high to low): - Letters in one word and no spaces with exact match. Example: 'up' in 'up stroke' - Letters in one word and no spaces with partial match. Example: 'up' in 'upstream stroke' - Letters in one word but with skip letters. Example: 'cls' in 'close up' - Letters in two or more words Example: 'cls' in 'car lost' """ original_choice = choice result = (original_choice, NOT_FOUND_SCORE) # Handle empty string case if not query: return result if ignore_case: query = query.lower() choice = choice.lower() if apply_regex: pattern = get_search_regex(query, ignore_case=ignore_case) r = re.search(pattern, choice) if r is None: return result else: sep = u'-' # Matches will be replaced by this character let = u'x' # Nonmatches (except spaed) will be replaced by this score = 0 exact_words = [query == word for word in choice.split(u' ')] partial_words = [query in word for word in choice.split(u' ')] if any(exact_words) or any(partial_words): pos_start = choice.find(query) pos_end = pos_start + len(query) score += pos_start text = choice.replace(query, sep*len(query), 1) enriched_text = original_choice[:pos_start] +\ template.format(original_choice[pos_start:pos_end]) +\ original_choice[pos_end:] if any(exact_words): # Check if the query words exists in a word with exact match score += 1 elif any(partial_words): # Check if the query words exists in a word with partial match score += 100 else: # Check letter by letter text = [l for l in original_choice] if ignore_case: temp_text = [l.lower() for l in original_choice] else: temp_text = text[:] # Give points to start of string score += temp_text.index(query[0]) # Find the query letters and replace them by `sep`, also apply # template as needed for enricching the letters in the text enriched_text = text[:] for char in query: if char != u'' and char in temp_text: index = temp_text.index(char) enriched_text[index] = template.format(text[index]) text[index] = sep temp_text = [u' ']*(index + 1) + temp_text[index+1:] enriched_text = u''.join(enriched_text) patterns_text = [] for i, char in enumerate(text): if char != u' ' and char != sep: new_char = let else: new_char = char patterns_text.append(new_char) patterns_text = u''.join(patterns_text) for i in reversed(range(1, len(query) + 1)): score += (len(query) - patterns_text.count(sep*i))*100000 temp = patterns_text.split(sep) while u'' in temp: temp.remove(u'') if not patterns_text.startswith(sep): temp = temp[1:] if not patterns_text.endswith(sep): temp = temp[:-1] for pat in temp: score += pat.count(u' ')*10000 score += pat.count(let)*100 return original_choice, enriched_text, score
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/utils/stringmatching.py#L50-L176
fuzzy match ranking
python
def query(self, query_text, n=10): """Return a list of n (score, docid) pairs for the best matches. Also handle the special syntax for 'learn: command'.""" if query_text.startswith("learn:"): doctext = os.popen(query_text[len("learn:"):], 'r').read() self.index_document(doctext, query_text) return [] qwords = [w for w in words(query_text) if w not in self.stopwords] shortest = argmin(qwords, lambda w: len(self.index[w])) docs = self.index[shortest] results = [(sum([self.score(w, d) for w in qwords]), d) for d in docs] results.sort(); results.reverse() return results[:n]
https://github.com/hobson/aima/blob/3572b2fb92039b4a1abe384be8545560fbd3d470/aima/text.py#L126-L138
fuzzy match ranking
python
def ratio(fullname1, fullname2, strictness='default', options=None): """ Takes two names and returns true if they describe the same person. Uses difflib's sequence matching on a per-field basis for names :param string fullname1: first human name :param string fullname2: second human name :param string strictness: strictness settings to use :param dict options: custom strictness settings updates :return int: sequence ratio match (out of 100) """ if options is not None: settings = deepcopy(SETTINGS[strictness]) deep_update_dict(settings, options) else: settings = SETTINGS[strictness] name1 = Name(fullname1) name2 = Name(fullname2) return name1.ratio_deep_compare(name2, settings)
https://github.com/rliebz/whoswho/blob/0c411e418c240fcec6ea0a23d15bd003056c65d0/whoswho/who.py#L31-L52
fuzzy match ranking
python
def compute_similarity_score(self, unit1, unit2): """ Returns the similarity score between two words. The type of similarity scoring method used depends on the currently active method and clustering type. :param unit1: Unit object corresponding to the first word. :type unit1: Unit :param unit2: Unit object corresponding to the second word. :type unit2: Unit :return: Number indicating degree of similarity of the two input words. The maximum value is 1, and a higher value indicates that the words are more similar. :rtype : Float The similarity method used depends both on the type of test being performed (SEMANTIC or PHONETIC) and the similarity method currently assigned to the self.current_similarity_measure property of the VFClustEngine object. The similarity measures used are the following: - PHONETIC/"phone": the phonetic similarity score (PSS) is calculated between the phonetic representations of the input units. It is equal to 1 minus the Levenshtein distance between two strings, normalized to the length of the longer string. The strings should be compact phonetic representations of the two words. (This method is a modification of a Levenshtein distance function available at http://hetland.org/coding/python/levenshtein.py.) - PHONETIC/"biphone": the binary common-biphone score (CBS) depends on whether two words share their initial and/or final biphone (i.e., set of two phonemes). A score of 1 indicates that two words have the same intial and/or final biphone; a score of 0 indicates that two words have neither the same initial nor final biphone. This is also calculated using the phonetic representation of the two words. - SEMANTIC/"lsa": a semantic relatedness score (SRS) is calculated as the COSINE of the respective term vectors for the first and second word in an LSA space of the specified clustering_parameter. Unlike the PHONETIC methods, this method uses the .text property of the input Unit objects. """ if self.type == "PHONETIC": word1 = unit1.phonetic_representation word2 = unit2.phonetic_representation if self.current_similarity_measure == "phone": word1_length, word2_length = len(word1), len(word2) if word1_length > word2_length: # Make sure n <= m, to use O(min(n,m)) space word1, word2 = word2, word1 word1_length, word2_length = word2_length, word1_length current = range(word1_length + 1) for i in range(1, word2_length + 1): previous, current = current, [i] + [0] * word1_length for j in range(1, word1_length + 1): add, delete = previous[j] + 1, current[j - 1] + 1 change = previous[j - 1] if word1[j - 1] != word2[i - 1]: change += 1 current[j] = min(add, delete, change) phonetic_similarity_score = 1 - current[word1_length] / word2_length return phonetic_similarity_score elif self.current_similarity_measure == "biphone": if word1[:2] == word2[:2] or word1[-2:] == word2[-2:]: common_biphone_score = 1 else: common_biphone_score = 0 return common_biphone_score elif self.type == "SEMANTIC": word1 = unit1.text word2 = unit2.text if self.current_similarity_measure == "lsa": w1_vec = self.term_vectors[word1] w2_vec = self.term_vectors[word2] # semantic_relatedness_score = (numpy.dot(w1_vec, w2_vec) / # numpy.linalg.norm(w1_vec) / # numpy.linalg.norm(w2_vec)) dot = sum([w1*w2 for w1,w2 in zip(w1_vec, w2_vec)]) norm1 = sqrt(sum([w*w for w in w1_vec])) norm2 = sqrt(sum([w*w for w in w2_vec])) semantic_relatedness_score = dot/(norm1 * norm2) return semantic_relatedness_score elif self.current_similarity_measure == "custom": #look it up in dict try: similarity = self.custom_similarity_scores[(word1,word2)] except KeyError: try: similarity = self.custom_similarity_scores[(word2,word1)] except KeyError: if word1 == word2: return self.same_word_similarity #if they're the same word, they pass. This should only happen when checking with # non-adjacent words in the same cluster else: return 0 #if words aren't found, they are defined as dissimilar return similarity return None
https://github.com/speechinformaticslab/vfclust/blob/7ca733dea4782c828024765726cce65de095d33c/vfclust/vfclust.py#L943-L1042