content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def check(lst: list, search_element: int) -> bool: """Check if the list contains the search_element.""" return any([True for i in lst if i == search_element])
15f35ceff44e9fde28f577663e79a2216ffce148
709,135
from pathlib import Path def data_dir(test_dir: Path) -> Path: """ Create a directory for storing the mock data set. """ _data_dir = test_dir / 'data' _data_dir.mkdir(exist_ok=True) return _data_dir
3b204816252a2c87698197a416a4e2de218f639d
709,136
def where_between(field_name, start_date, end_date): """ Return the bit of query for the dates interval. """ str = """ {0} between date_format('{1}', '%%Y-%%c-%%d %%H:%%i:%%S') and date_format('{2}', '%%Y-%%c-%%d 23:%%i:%%S') """ .format( field_name, start_date.strftime("%Y-%m-%d %H:%M:%S"), end_date.strftime("%Y-%m-%d %H:%M:%S")) return str
4801d01ac8743f138e7c558da40518b75ca6daed
709,145
import re def sort_with_num(path): """Extract leading numbers in a file name for numerical sorting.""" fname = path.name nums = re.match('^\d+', fname) if nums: return int(nums[0]) else: return 0
2209384720c33b8201c06f7a14b431972712814a
709,148
import base64 def urlsafe_b64decode_nopadding(val): """Deal with unpadded urlsafe base64.""" # Yes, it accepts extra = characters. return base64.urlsafe_b64decode(str(val) + '===')
22ed00b07e16b4b557dc46b5caeb9f7ce9513c0d
709,153
def _subimg_bbox(img, subimage, xc, yc): """ Find the x/y bounding-box pixel coordinates in ``img`` needed to add ``subimage``, centered at ``(xc, yc)``, to ``img``. Returns ``None`` if the ``subimage`` would extend past the ``img`` boundary. """ ys, xs = subimage.shape y, x = img.shape y0 = int(yc - (ys - 1) / 2.0) y1 = y0 + ys x0 = int(xc - (xs - 1) / 2.0) x1 = x0 + xs if (x0 >= 0) and (y0 >= 0) and (x1 < x) and (y1 < y): return (x0, x1, y0, y1) else: return None
b299a6b3726ced525b538b4fea45b235fc0bd56e
709,154
def fuzzy_lookup_item(name_or_id, lst): """Lookup an item by either name or id. Looking up by id is exact match. Looking up by name is by containment, and if the term is entirely lowercase then it's also case-insensitive. Multiple matches will throw an exception, unless one of them was an exact match. """ try: idd = int(name_or_id) for val in lst: if val.id == idd: return val raise RuntimeError('Id %d not found!' % idd) except ValueError: insensitive = name_or_id.islower() matches = [] for val in lst: name = val.name or '' if name_or_id == name: return val if insensitive: name = name.lower() if name_or_id in name: matches.append(val) if len(matches) == 1: return matches[0] if not matches: raise RuntimeError(f'No name containing {name_or_id!r} found!') from None raise RuntimeError( f'Multiple matches for {name_or_id!r}: {[x.name for x in matches]}') from None
604b3879d0f97822d5a36db6dcf468ef8eefaac9
709,157
def _flatten_value_to_list(batch_values): """Converts an N-D dense or sparse batch to a 1-D list.""" # Ravel for flattening and tolist so that we go to native Python types # for more efficient followup processing. # batch_value, = batch_values return batch_value.ravel().tolist()
77bfd9d32cbbf86a16a8da2701417a9ac9b9cc93
709,158
import tarfile def extract_tarball(tarball, install_dir): """Extract tarball to a local path""" if not tarball.path.is_file(): raise IOError(f"<info>{tarball.path}</info> is not a file!") try: with tarfile.open(tarball.path, "r:gz") as f_tarball: extraction_dir = [ obj.name for obj in f_tarball.getmembers() if obj.isdir() and "/" not in obj.name ][0] f_tarball.extractall(install_dir) except tarfile.ReadError as exc: raise IOError(f"<info>{tarball.path}</info> is not a valid tarball!") from exc return install_dir / extraction_dir
da9deeb71da36c7c01611f3be7965a8c4a22dc41
709,160
def _VarintSize(value): """Compute the size of a varint value.""" if value <= 0x7f: return 1 if value <= 0x3fff: return 2 if value <= 0x1fffff: return 3 if value <= 0xfffffff: return 4 if value <= 0x7ffffffff: return 5 if value <= 0x3ffffffffff: return 6 if value <= 0x1ffffffffffff: return 7 if value <= 0xffffffffffffff: return 8 if value <= 0x7fffffffffffffff: return 9 return 10
4bd9b1c8d362f5e72e97f9f2c8e0d5711065291f
709,162
def is_android(builder_cfg): """Determine whether the given builder is an Android builder.""" return ('Android' in builder_cfg.get('extra_config', '') or builder_cfg.get('os') == 'Android')
74b1620ba2f6fff46495174158f734c5aa8da372
709,163
def sentinel_id(vocabulary, return_value=None): """Token ID to use as a sentinel. By default, we use the last token in the vocabulary. Args: vocabulary: a t5.data.vocabularies.Vocabulary return_value: an optional integer Returns: an integer """ if return_value is not None: return return_value return vocabulary.vocab_size - 1
08ad1116b7f41ba7070359675a0133f14b9917bd
709,168
from typing import Type def is_dict_specifier(value): # type: (object) -> bool """ Check if value is a supported dictionary. Check if a parameter of the task decorator is a dictionary that specifies at least Type (and therefore can include things like Prefix, see binary decorator test for some examples). :param value: Decorator value to check. :return: True if value is a dictionary that specifies at least the Type of the key. """ return isinstance(value, dict) and Type in value
e18ad83a1b79a8150dfda1c65f4ab7e72cc8c8c8
709,169
def parse_star_count(stars_str): """Parse strings like 40.3k and get the no. of stars as a number""" stars_str = stars_str.strip() return int(float(stars_str[:-1]) * 1000) if stars_str[-1] == 'k' else int(stars_str)
d47177f26656e6dc33d708a0c4824ff677f3387a
709,170
def with_color(text, color, bold=False): """ Return a ZSH color-formatted string. Arguments --------- text: str text to be colored color: str ZSH color code bold: bool whether or not to make the text bold Returns ------- str string with ZSH color-coded text """ color_fmt = '$fg_bold[{:s}]' if bold else '$fg[{:s}]' return '%{{{:s}%}}{:s}%{{$reset_color%}}'.format( color_fmt.format(color), text)
40c194d9de76ab504a25592cfb13407cb089da0a
709,174
def transition_soil_carbon(area_final, carbon_final, depth_final, transition_rate, year, area_initial, carbon_initial, depth_initial): """This is the formula for calculating the transition of soil carbon .. math:: (af * cf * df) - \ \\frac{1}{(1 + tr)^y} * \ [(af * cf * df) - \ (ai * ci * di)] where * :math:`af` is area_final * :math:`cf` is carbon_final * :math:`df` is depth_final * :math:`tr` is transition_rate * :math:`y` is year * :math:`ai` is area_initial * :math:`ci` is carbon_initial * :math:`di` is depth_initial Args: area_final (float): The final area of the carbon carbon_final (float): The final amount of carbon per volume depth_final (float): The final depth of carbon transition_rate (float): The rate at which the transition occurs year (float): The amount of time in years overwhich the transition occurs area_initial (float): The intial area of the carbon carbon_initial (float): The iniital amount of carbon per volume depth_initial (float): The initial depth of carbon Returns: float: Transition amount of soil carbon """ return (area_final * carbon_final * depth_final) - \ (1/((1 + transition_rate) ** year)) * \ ((area_final * carbon_final * depth_final) - \ (area_initial * carbon_initial * depth_initial))
bfbf83f201eb8b8b0be0ec6a8722e850f6084e95
709,175
def split_data_set(data_set, axis, value): """ 按照给定特征划分数据集,筛选某个特征为指定特征值的数据 (然后因为是按该特征进行划分了,该特征在以后的划分中就不用再出现,所以把该特征在新的列表中移除) :param data_set: 待划分的数据集,格式如下,每一行是一个list,list最后一个元素就是标签,其他元素是特征 :param axis: 划分数据集的特征(特征的序号) :param value: 需要返回的特征的值(筛选特征的值要等于此值) :return: >>>myDat = [[1, 1, 'yes'], [1, 1, 'yes'], [1, 0, 'no'], [0, 1, 'no'], [0, 1, 'no']] >>>split_data_set(myDat,0,1) [[1, 'yes'], [1, 'yes'], [0, 'no']] >>>split_data_set(myDat,0,0) [[1, 'no'], [1, 'no']] """ # 创建新的list对象 ret_data_set = [] for feature_vec in data_set: if feature_vec[axis] == value: # 抽取, 把指定特征从列表中去掉,组成一个新的特征+标签的列表 reduced_feature_vec = feature_vec[:axis] reduced_feature_vec.extend(feature_vec[axis + 1:]) ret_data_set.append(reduced_feature_vec) return ret_data_set
f90fdffee3bbee4b4477e371a9ed43094051126a
709,179
from typing import Any def escape_parameter(value: Any) -> str: """ Escape a query parameter. """ if value == "*": return value if isinstance(value, str): value = value.replace("'", "''") return f"'{value}'" if isinstance(value, bytes): value = value.decode("utf-8") return f"'{value}'" if isinstance(value, bool): return "TRUE" if value else "FALSE" if isinstance(value, (int, float)): return str(value) return f"'{value}'"
00b706681b002a3226874f04e74acbb67d54d12e
709,186
def make_str_lst_unc_val(id, luv): """ make_str_lst_unc_val(id, luv) Make a formatted string from an ID string and a list of uncertain values. Input ----- id A number or a string that will be output as a string. luv A list of DTSA-II UncertainValue2 items. These will be printed as comma-delimited pairs with 6 digits following the decimal. Return ------ A string with comma-delimited values with the ID and mean and uncertainty for each item in the list. This is suitable for writing output to a .csv file. Example: -------- import dtsa2.jmGen as jmg import gov.nist.microanalysis.Utility as epu nmZnO1 = 40.1 uvOKa1 = epu.UncertainValue2(0.269157,0.000126) uvZnLa1 = epu.UncertainValue2(0.259251,9.4e-05) uvSiKa1 = epu.UncertainValue2(0.654561,8.4e-05) l_uvals = [uvOKa1, uvZnLa1, uvSiKa1] out = jmg.make_list_unc_val_string(nmZnO1, l_uvals) print(out) 1> 40.1, 0.269157, 0.000126, 0.259251, 0.000094, 0.654561, 0.000084 """ lv = len(luv) i = 0 rv = "%s, " % (id) for uv in luv: rc = round(uv.doubleValue(), 6) uc = round(uv.uncertainty(), 6) if i == lv-1: rv += "%g, %.6f" % (rc, uc) else: rv += "%g, %.6f, " % (rc, uc) i += 1 return(rv)
c65b9bb0c6539e21746a06f7a864acebc2bade03
709,189
import typing def translate_null_strings_to_blanks(d: typing.Dict) -> typing.Dict: """Map over a dict and translate any null string values into ' '. Leave everything else as is. This is needed because you cannot add TableCell objects with only a null string or the client crashes. :param Dict d: dict of item values. :rtype Dict: """ # Beware: locally defined function. def translate_nulls(s): if s == "": return " " return s new_d = {k: translate_nulls(v) for k, v in d.items()} return new_d
1a6cfe2f8449d042eb01774054cddde08ba56f8c
709,190
import time def timer(func): """ Decorator to measure execution time """ def wrapper(*args, **kwargs): start_time = time.time() ret = func(*args, **kwargs) elapsed = time.time() - start_time print('{:s}: {:4f} sec'.format(func.__name__, elapsed)) return ret return wrapper
0f6a8a4dc8eff1aa49efaf5d26ac46e0cc483b3e
709,192
import uuid def _create_keyword_plan_campaign(client, customer_id, keyword_plan): """Adds a keyword plan campaign to the given keyword plan. Args: client: An initialized instance of GoogleAdsClient customer_id: A str of the customer_id to use in requests. keyword_plan: A str of the keyword plan resource_name this keyword plan campaign should be attributed to.create_keyword_plan. Returns: A str of the resource_name for the newly created keyword plan campaign. Raises: GoogleAdsException: If an error is returned from the API. """ keyword_plan_campaign_service = client.get_service( "KeywordPlanCampaignService" ) operation = client.get_type("KeywordPlanCampaignOperation") keyword_plan_campaign = operation.create keyword_plan_campaign.name = f"Keyword plan campaign {uuid.uuid4()}" keyword_plan_campaign.cpc_bid_micros = 1000000 keyword_plan_campaign.keyword_plan = keyword_plan network = client.enums.KeywordPlanNetworkEnum.GOOGLE_SEARCH keyword_plan_campaign.keyword_plan_network = network geo_target = client.get_type("KeywordPlanGeoTarget") # Constant for U.S. Other geo target constants can be referenced here: # https://developers.google.com/google-ads/api/reference/data/geotargets geo_target.geo_target_constant = "geoTargetConstants/2840" keyword_plan_campaign.geo_targets.append(geo_target) # Constant for English language = "languageConstants/1000" keyword_plan_campaign.language_constants.append(language) response = keyword_plan_campaign_service.mutate_keyword_plan_campaigns( customer_id=customer_id, operations=[operation] ) resource_name = response.results[0].resource_name print(f"Created keyword plan campaign with resource name: {resource_name}") return resource_name
b6ce2ee2ec40e1192461c41941f18fe04f901344
709,193
def is_hermitian(mx, tol=1e-9): """ Test whether mx is a hermitian matrix. Parameters ---------- mx : numpy array Matrix to test. tol : float, optional Tolerance on absolute magitude of elements. Returns ------- bool True if mx is hermitian, otherwise False. """ (m, n) = mx.shape for i in range(m): if abs(mx[i, i].imag) > tol: return False for j in range(i + 1, n): if abs(mx[i, j] - mx[j, i].conjugate()) > tol: return False return True
31e9a1faff21707b2fc44c7824bb05fc85967f00
709,194
from typing import Any def get_object_unique_name(obj: Any) -> str: """Return a unique string associated with the given object. That string is constructed as follows: <object class name>_<object_hex_id> """ return f"{type(obj).__name__}_{hex(id(obj))}"
f817abf636673f7ef6704cbe0ff5a7a2b897a3f6
709,195
def recurse_while(predicate, f, *args): """ Accumulate value by executing recursively function `f`. The function `f` is executed with starting arguments. While the predicate for the result is true, the result is fed into function `f`. If predicate is never true then starting arguments are returned. :param predicate: Predicate function guarding execution. :param f: Function to execute. :param *args: Starting arguments. """ result = f(*args) result = result if type(result) == tuple else (result, ) while predicate(*result): args = result # predicate(args) is always true result = f(*args) result = result if type(result) == tuple else (result, ) return args if len(args) > 1 else args[0]
fd3313760c246336519a2e89281cc94a2bee6833
709,198
def total_allocation_constraint(weight, allocation: float, upper_bound: bool = True): """ Used for inequality constraint for the total allocation. :param weight: np.array :param allocation: float :param upper_bound: bool if true the constraint is from above (sum of weights <= allocation) else from below (sum of weights <= allocation) :return: np.array """ if upper_bound: return allocation - weight.sum() else: return weight.sum() - allocation
b92c4bd18d1c6246ff202987c957a5098fd66ba1
709,199
import torch def conv(input, weight): """ Returns the convolution of input and weight tensors, where input contains sequential data. The convolution is along the sequence axis. input is of size [batchSize, inputDim, seqLength] """ output = torch.nn.functional.conv1d(input=input, weight=weight) return output
e213be11c423ff63a1ebffda55331298fcf53443
709,203
def _normalize_handler_method(method): """Transforms an HTTP method into a valid Python identifier.""" return method.lower().replace("-", "_")
aad23dba304ba39708e4415de40019479ccf0195
709,205
def replace_cipd_revision(file_path, old_revision, new_revision): """Replaces cipd revision strings in file. Args: file_path: Path to file. old_revision: Old cipd revision to be replaced. new_revision: New cipd revision to use as replacement. Returns: Number of replaced occurrences. Raises: IOError: If no occurrences were found. """ with open(file_path) as f: contents = f.read() num = contents.count(old_revision) if not num: raise IOError('Did not find old CIPD revision {} in {}'.format( old_revision, file_path)) newcontents = contents.replace(old_revision, new_revision) with open(file_path, 'w') as f: f.write(newcontents) return num
f429e74f0dd7180ab4bf90d662f8042b958b81f8
709,211
def escape_cdata(cdata): """Escape a string for an XML CDATA section""" return cdata.replace(']]>', ']]>]]&gt;<![CDATA[')
c38b934b4c357e8c15fd1f3942f84ca3aaab4ee1
709,213
def _strip_unbalanced_punctuation(text, is_open_char, is_close_char): """Remove unbalanced punctuation (e.g parentheses or quotes) from text. Removes each opening punctuation character for which it can't find corresponding closing character, and vice versa. It can only handle one type of punctuation (e.g. it could strip quotes or parentheses but not both). It takes functions (is_open_char, is_close_char), instead of the characters themselves, so that we can determine from nearby characters whether a straight quote is an opening or closing quote. Args: text (string): the text to fix is_open_char: a function that accepts the text and an index, and returns true if the character at that index is an opening punctuation mark. is_close_char: same as is_open_char for closing punctuation mark. Returns: The text with unmatched punctuation removed. """ # lists of unmatched opening and closing chararacters opening_chars = [] unmatched_closing_chars = [] for idx, c in enumerate(text): if is_open_char(text, idx): opening_chars.append(idx) elif is_close_char(text, idx): if opening_chars: # this matches a character we found earlier opening_chars.pop() else: # this doesn't match any opening character unmatched_closing_chars.append(idx) char_indices = [i for (i, _) in enumerate(text) if not(i in opening_chars or i in unmatched_closing_chars)] stripped_text = "".join([text[i] for i in char_indices]) return stripped_text
db4b8f201e7b01922e6c06086594a8b73677e2a2
709,214
def get_min_max_value(dfg): """ Gets min and max value assigned to edges in DFG graph Parameters ----------- dfg Directly follows graph Returns ----------- min_value Minimum value in directly follows graph max_value Maximum value in directly follows graph """ min_value = 9999999999 max_value = -1 for edge in dfg: if dfg[edge] < min_value: min_value = dfg[edge] if dfg[edge] > max_value: max_value = dfg[edge] return min_value, max_value
17a98350f4e13ec51e72d4357e142ad661e57f54
709,215
import time def fmt_time(timestamp): """Return ISO formatted time from seconds from epoch.""" if timestamp: return time.strftime('%Y-%m-%dT%H:%M:%S', time.localtime(timestamp)) else: return '-'
c87f1da7b6a3b1b8d8daf7d85a2b0746be58133b
709,216
from typing import Tuple def break_word_by_trailing_integer(pname_fid: str) -> Tuple[str, str]: """ Splits a word that has a value that is an integer Parameters ---------- pname_fid : str the DVPRELx term (e.g., A(11), NSM(5)) Returns ------- word : str the value not in parentheses value : int the value in parentheses Examples -------- >>> break_word_by_trailing_integer('T11') ('T', '11') >>> break_word_by_trailing_integer('THETA11') ('THETA', '11') """ nums = [] i = 0 for i, letter in enumerate(reversed(pname_fid)): if letter.isdigit(): nums.append(letter) else: break num = ''.join(nums[::-1]) if not num: msg = ("pname_fid=%r does not follow the form 'T1', 'T11', 'THETA42' " "(letters and a number)" % pname_fid) raise SyntaxError(msg) word = pname_fid[:-i] assert len(word)+len(num) == len(pname_fid), 'word=%r num=%r pname_fid=%r' % (word, num, pname_fid) return word, num
e9b9c85b4225269c94918ce1cc2e746d3c74aa5c
709,217
def _get_unique_barcode_ids(pb_index, isoseq_mode=False): """ Get a list of sorted, unique fw/rev barcode indices from an index object. """ bc_sel = (pb_index.bcForward != -1) & (pb_index.bcReverse != -1) bcFw = pb_index.bcForward[bc_sel] bcRev = pb_index.bcReverse[bc_sel] bc_ids = sorted(list(set(zip(bcFw, bcRev)))) if isoseq_mode: bc_ids = sorted(list(set([tuple(sorted(bc)) for bc in bc_ids]))) return bc_ids
bdfb386d26415a7b3f9f16661d83a38a63958ad0
709,219
def argMax(scores): """ Returns the key with the highest value. """ if len(scores) == 0: return None all = scores.items() values = [x[1] for x in all] maxIndex = values.index(max(values)) return all[maxIndex][0]
9310988a0f8aa1279882d060ade7febdc102b0c5
709,227
def get_ratio(numerator, denominator): """Get ratio from numerator and denominator.""" return ( 0 if not denominator else round(float(numerator or 0) / float(denominator), 2) )
e51a860292d54d2e44909ad878d0b1d8e66c37c2
709,228
def irrf(valor=0): """ -> Função para cálcular o valor do IRRF. :param valor: Valor base do salário para cálculo do IRRF. :return: Retorna o valor do IRRF e alíquota utilizada. """ irrf = [] if valor < 1903.99: irrf.append(0) irrf.append(0) elif valor >= 1903.99 and valor <= 2826.65: irrf.append((valor * 7.5) / 100 - 142.80) # Alíquota de 7.5%, menos parcela de dedução. irrf.append('7,5') elif valor >= 2826.66 and valor <= 3751.05: irrf.append((valor * 15) / 100 - 354.80) # Alíquota de 15%, menos parcela de dedução. irrf.append('15') elif valor >= 3751.06 and valor <= 4664.68: irrf.append((valor * 22.5) / 100 - 636.13) # Alíquota de 22.5%, menos parcela de dedução. irrf.append('22,5') elif valor > 4664.68: irrf.append((valor * 27.5) / 100 - 869.36) # Alíquota de 27.5%, menos parcela de dedução. irrf.append('27,5') return irrf
53646b770b2c2359e1e8c4f725b27396cc972050
709,229
def get_reddit_slug(permalink): """ Get the reddit slug from a submission permalink, with '_' replaced by '-' Args: permalink (str): reddit submission permalink Returns: str: the reddit slug for a submission """ return list(filter(None, permalink.split("/")))[-1].replace("_", "-")
587239a0b7bbd88e10d49985dd6ebfd3768038d8
709,233
import torch def get_adjacent_th(spec: torch.Tensor, filter_length: int = 5) -> torch.Tensor: """Zero-pad and unfold stft, i.e., add zeros to the beginning so that, using the multi-frame signal model, there will be as many output frames as input frames. Args: spec (torch.Tensor): input spectrum (B, F, T, 2) filter_length (int): length for frame extension Returns: ret (torch.Tensor): output spectrum (B, F, T, filter_length, 2) """ # noqa: D400 return ( torch.nn.functional.pad(spec, pad=[0, 0, filter_length - 1, 0]) .unfold(dimension=-2, size=filter_length, step=1) .transpose(-2, -1) .contiguous() )
4009b41fd4e729e16c749f4893f61b61ca922215
709,234
def K2(eps): """ Radar dielectric factor |K|**2 Parameters ---------- eps : complex nd array of complex relative dielectric constants Returns ------- nd - float Radar dielectric factor |K|**2 real """ K_complex = (eps-1.0)/(eps+2.0) return (K_complex*K_complex.conj()).real
8754bee38a46de14d205764c4843cad7c4d5d88f
709,235
import torch def projection_from_Rt(rmat, tvec): """ Compute the projection matrix from Rotation and translation. """ assert len(rmat.shape) >= 2 and rmat.shape[-2:] == (3, 3), rmat.shape assert len(tvec.shape) >= 2 and tvec.shape[-2:] == (3, 1), tvec.shape return torch.cat([rmat, tvec], dim=-1)
90039ba7002be31d347b7793d542b1ff37abae3e
709,236
def threshold_abs(image, threshold): """Return thresholded image from an absolute cutoff.""" return image > threshold
5032f632371af37e81c3ebcc587475422d5ff2bf
709,241
def list_manipulation(lst, command, location, value=None): """Mutate lst to add/remove from beginning or end. - lst: list of values - command: command, either "remove" or "add" - location: location to remove/add, either "beginning" or "end" - value: when adding, value to add remove: remove item at beginning or end, and return item removed >>> lst = [1, 2, 3] >>> list_manipulation(lst, 'remove', 'end') 3 >>> list_manipulation(lst, 'remove', 'beginning') 1 >>> lst [2] add: add item at beginning/end, and return list >>> lst = [1, 2, 3] >>> list_manipulation(lst, 'add', 'beginning', 20) [20, 1, 2, 3] >>> list_manipulation(lst, 'add', 'end', 30) [20, 1, 2, 3, 30] >>> lst [20, 1, 2, 3, 30] Invalid commands or locations should return None: >>> list_manipulation(lst, 'foo', 'end') is None True >>> list_manipulation(lst, 'add', 'dunno') is None True """ if command == "remove": if location == "end": return lst.pop() elif location == "beginning": return lst.pop(0) elif command == "add": if location == "beginning": lst.insert(0,value) return lst elif location == "end": lst.append(value) return lst
c847257ea5508f60b84282c3ac8237b43cd3825a
709,243
import collections def _get_ngrams(segment, max_order): """Extracts all n-grams upto a given maximum order from an input segment. Args: segment: text segment from which n-grams will be extracted. max_order: maximum length in tokens of the n-grams returned by this methods. Returns: The Counter containing all n-grams upto max_order in segment with a count of how many times each n-gram occurred. """ ngram_counts = collections.Counter() for order in range(1, max_order + 1): for i in range(0, len(segment) - order + 1): ngram = tuple(segment[i:i + order]) ngram_counts[ngram] += 1 return ngram_counts
561dfe8c18810ce40ce4c0ff391d6838816de116
709,248
import re def find_version(infile): """ Given an open file (or some other iterator of lines) holding a configure.ac file, find the current version line. """ for line in infile: m = re.search(r'AC_INIT\(\[tor\],\s*\[([^\]]*)\]\)', line) if m: return m.group(1) return None
35ac18757ee1156f046bbd9ffa68ed4898bc317a
709,250
def int_array_to_hex(iv_array): """ Converts an integer array to a hex string. """ iv_hex = '' for b in iv_array: iv_hex += '{:02x}'.format(b) return iv_hex
f3332b7672a266ad9cae9fc52bc8e1152bcee58b
709,254
def scale(a: tuple, scalar: float) -> tuple: """Scales the point.""" return a[0] * scalar, a[1] * scalar
9638b8cfbd792c2deb35da304c5c375e0402404e
709,256
def timedelta_to_seconds(ts): """ Convert the TimedeltaIndex of a pandas.Series into a numpy array of seconds. """ seconds = ts.index.values.astype(float) seconds -= seconds[-1] seconds /= 1e9 return seconds
4565d7a691e8ac004d9d529568db0d032a56d088
709,260
def parse_gage(s): """Parse a streamgage key-value pair. Parse a streamgage key-value pair, separated by '='; that's the reverse of ShellArgs. On the command line (argparse) a declaration will typically look like:: foo=hello or foo="hello world" :param s: str :rtype: tuple(key, value) """ # Adapted from: https://gist.github.com/fralau/061a4f6c13251367ef1d9a9a99fb3e8d items = s.split('=') key = items[0].strip() # we remove blanks around keys, as is logical value = '' if len(items) > 1: # rejoin the rest: value = '='.join(items[1:]) return key, value
299b47f3a4757c924620bdc05e74f195a4cb7967
709,261
def calc_internal_hours(entries): """ Calculates internal utilizable hours from an array of entry dictionaries """ internal_hours = 0.0 for entry in entries: if entry['project_name'][:22] == "TTS Acq / Internal Acq" and not entry['billable']: internal_hours = internal_hours + float(entry['hours_spent']) return internal_hours
0962ee49f60ac296668294e6d2f075ce981cbc55
709,262
def callable_or_raise(obj): """Check that an object is callable, else raise a :exc:`ValueError`. """ if not callable(obj): raise ValueError('Object {0!r} is not callable.'.format(obj)) return obj
cb6dd8c03ea41bb94a8357553b3f3998ffcc0d65
709,264
from functools import reduce def min_column_widths(rows): """Computes the minimum column width for the table of strings. >>> min_column_widths([["some", "fields"], ["other", "line"]]) [5, 6] """ def lengths(row): return map(len, row) def maximums(row1, row2) : return map(max, row1, row2) return reduce(maximums, map(lengths, rows))
36722e4250dde561836c1ea3042b796ed7650986
709,267
import json def parse_json(json_path): """ Parse training params json file to python dictionary :param json_path: path to training params json file :return: python dict """ with open(json_path) as f: d = json.load(f) return d
c34b241813996a8245ea8c334de72f0fbffe8a31
709,268
def expand_not(tweets): """ DESCRIPTION: In informal speech, which is widely used in social media, it is common to use contractions of words (e.g., don't instead of do not). This may result in misinterpreting the meaning of a phrase especially in the case of negations. This function expands these contractions and other similar ones (e.g it's --> it is etc...). INPUT: tweets: Series of a set of tweets as a python strings OUTPUT: Series of filtered tweets """ tweets = tweets.str.replace('n\'t', ' not', case=False) tweets = tweets.str.replace('i\'m', 'i am', case=False) tweets = tweets.str.replace('\'re', ' are', case=False) tweets = tweets.str.replace('it\'s', 'it is', case=False) tweets = tweets.str.replace('that\'s', 'that is', case=False) tweets = tweets.str.replace('\'ll', ' will', case=False) tweets = tweets.str.replace('\'l', ' will', case=False) tweets = tweets.str.replace('\'ve', ' have', case=False) tweets = tweets.str.replace('\'d', ' would', case=False) tweets = tweets.str.replace('he\'s', 'he is', case=False) tweets = tweets.str.replace('what\'s', 'what is', case=False) tweets = tweets.str.replace('who\'s', 'who is', case=False) tweets = tweets.str.replace('\'s', '', case=False) for punct in ['!', '?', '.']: regex = "(\\"+punct+"( *)){2,}" tweets = tweets.str.replace(regex, punct+' <repeat> ', case=False) return tweets
66f4ed5c7321fe7bf5ea0d350980394a235d99e6
709,271
def _to_original(sequence, result): """ Cast result into the same type >>> _to_original([], ()) [] >>> _to_original((), []) () """ if isinstance(sequence, tuple): return tuple(result) if isinstance(sequence, list): return list(result) return result
7b9d8d1d2b119d61b43dde253d8d3c48bd0e45b8
709,274
def bytes_to_unicode_records(byte_string, delimiter, encoding): """ Convert a byte string to a tuple containing an array of unicode records and any remainder to be used as a prefix next time. """ string = byte_string.decode(encoding) records = string.split(delimiter) return (records[:-1], records[-1].encode(encoding))
ccc3591551a6b316843cc8eafb33e45627eac752
709,278
def remove_key(d, key): """Safely remove the `key` from the dictionary. Safely remove the `key` from the dictionary `d` by first making a copy of dictionary. Return the new dictionary together with the value stored for the `key`. Parameters ---------- d : dict The dictionary from which to remove the `key`. key : The key to remove Returns ------- v : The value for the key r : dict The dictionary with the key removed. """ r = dict(d) v = r[key] del r[key] return v, r
5695b18675b52f4ca8bc3cba1ed0104425e7a04f
709,285
def is_reviewer(user): """Return True if this user is a financial aid reviewer""" # no need to cache here, all the DB lookups used during has_perm # are already cached return user.has_perm("finaid.review_financial_aid")
e3c599f78eb51c33ab48e3760c0f2965ba305916
709,288
def isnonempty(value): """ Return whether the value is not empty Examples:: >>> isnonempty('a') True >>> isnonempty('') False :param value: string to validate whether value is not empty """ return value != ''
0250cb455d8f77027d5cde9101a24683950bbdb2
709,289
def company(anon, obj, field, val): """ Generates a random company name """ return anon.faker.company(field=field)
95580147817a37542f75e2c728941a159cd30bd3
709,292
def jump(inst_ptr, program, direction): """Jump the instruction pointer in the program until matching bracket""" count = direction while count != 0: inst_ptr += direction char = program[inst_ptr] if char == '[': count += 1 elif char == ']': count -= 1 else: pass return inst_ptr
76c6c4dcf4dbc452e9f2b252522871fcca95c75d
709,297
def remove_duplicates_from_list(params_list): """ Common function to remove duplicates from a list Author: Chaitanya-vella.kumar@broadcom.com :param params_list: :return: """ if params_list: return list(dict.fromkeys(params_list)) return list()
885b2e048ec672bd2d24fabe25066bc2df3ea8a8
709,299
import math def get_distance_metres(aLocation1, aLocation2): """ Returns the ground distance in metres between two LocationGlobal objects :param aLocation1: starting location :param aLocation2: ending location :return: """ dlat = aLocation2.lat - aLocation1.lat dlong = aLocation2.lon - aLocation1.lon dlong_c = dlong*math.cos(math.radians(aLocation1.lat)) return math.sqrt((dlat * dlat) + (dlong_c * dlong_c)) * 1.113195e5
5f1428c099f79ba8b41177f87e6a3bffed13e00b
709,300
def GetCLInfo(cl_info_str): """Gets CL's repo_name and revision.""" return cl_info_str.split('/')
d077216b2804c249a7d0ffdbff7f992dde106501
709,301
import torch def color2position(C, min=None, max=None): """ Converts the input points set into colors Parameters ---------- C : Tensor the input color tensor min : float (optional) the minimum value for the points set. If None it will be set to -1 (default is None) max : float (optional) the maximum value for the points set. If None it will be set to +1 (default is None) Returns ------- Tensor the points set tensor """ if min is None: min = -1 if max is None: max = 1 return torch.add(torch.mul(C, max-min), min)
809d8cfd6f24e6abb6d65d5b576cc0b0ccbc3fdf
709,306
def parse_dotted_path(path): """ Extracts attribute name from dotted path. """ try: objects, attr = path.rsplit('.', 1) except ValueError: objects = None attr = path return objects, attr
4685fad6461286b957a8d0056df2146fdd0f2e55
709,307
def simple_url_formatter(endpoint, url): """ A simple URL formatter to use when no application context is available. :param str endpoint: the endpoint to use. :param str url: the URL to format """ return u"/{}".format(url)
74f3e68fe10f7cc6bf8bfe81a7349a995bb79fa3
709,310
def determine_nohit_score(cons, invert): """ Determine the value in the matrix assigned to nohit given SeqFindr options :param cons: whether the Seqfindr run is using mapping consensus data or not :param invert: whether the Seqfindr run is inverting (missing hits to be shown as black bars. :type cons: None of boolean :type cons: boolean :returns: the value defined as no hit in the results matrix """ if cons is None: nohit = 0.5 else: nohit = 1.0 if invert: nohit = nohit*-1.0 return nohit
d0539b5ac4dda8b4a15c6800fb4a821cb305b319
709,324
import torch def ln_addTH(x : torch.Tensor, beta : torch.Tensor) -> torch.Tensor: """ out = x + beta[None, :, None] """ return x + beta[None, :, None]
77e556c41a33a8c941826604b4b595ea7d456f9a
709,326
def list_books(books): """Creates a string that, on each line, informs about a book.""" return '\n'.join([f'+ {book.name}: {book.renew_count}: {book.return_date}' for book in books])
fce770a39def7f40ed12820a578b4e327df7da43
709,328
def format_pvalue(p_value, alpha=0.05, include_equal=True): """ If p-value is lower than 0.05, change it to "<0.05", otherwise, round it to two decimals :param p_val: input p-value as a float :param alpha: significance level :param include_equal: include equal sign ('=') to pvalue (e.g., '=0.06') or not (e.g., '0.06') :return: p_val: processed p-value (replaced by "<0.05" or rounded to two decimals) as a str """ if p_value < alpha: p_value = "<" + str(alpha) else: if include_equal: p_value = '=' + str(round(p_value, 3)) else: p_value = str(round(p_value, 3)) return p_value
aa6506b14b68746f4fa58d951f246321e8b5a627
709,329
def genmatrix(list, combinfunc, symmetric=False, diagonal=None): """ Takes a list and generates a 2D-matrix using the supplied combination function to calculate the values. PARAMETERS list - the list of items combinfunc - the function that is used to calculate teh value in a cell. It has to cope with two arguments. symmetric - Whether it will be a symmetric matrix along the diagonal. For example, it the list contains integers, and the combination function is abs(x-y), then the matrix will be symmetric. Default: False diagonal - The value to be put into the diagonal. For some functions, the diagonal will stay constant. An example could be the function "x-y". Then each diagonal cell will be "0". If this value is set to None, then the diagonal will be calculated. Default: None """ matrix = [] row_index = 0 for item in list: row = [] col_index = 0 for item2 in list: if diagonal is not None and col_index == row_index: # if this is a cell on the diagonal row.append(diagonal) elif symmetric and col_index < row_index: # if the matrix is symmetric and we are "in the lower left triangle" row.append( matrix[col_index][row_index] ) else: # if this cell is not on the diagonal row.append(combinfunc(item, item2)) col_index += 1 matrix.append(row) row_index += 1 return matrix
b7d8ebc916f57621a20c371139162cb0504470cd
709,330
import sqlite3 def initialize_database() -> sqlite3.Connection: """Create a sqlite3 database stored in memory with two tables to hold users, records and history. Returns the connection to the created database.""" with sqlite3.connect("bank_buds.db") as conn: conn.execute("""CREATE TABLE IF NOT EXISTS user( customer_id TEXT NOT NULL, firstName TEXT NOT NULL, lastName TEXT NOT NULL, userName TEXT NOT NULL, userPass TEXT NOT NULL, balance INTEGER NOT NULL)""") conn.execute("""CREATE TABLE IF NOT EXISTS user_record( rec_id TEXT REFERENCES user NOT NULL, wins INTEGER NOT NULL, losses INTEGER NOT NULL)""") conn.execute("""CREATE TABLE IF NOT EXISTS challenge_history( challenge_id INTEGER NOT NULL, challenge_starter TEXT REFERENCES user NOT NULL, challenge_opponent TEXT REFERENCES user NOT NULL, challenge_winner TEXT REFERENCES user NOT NULL, challenge_loser TEXT REFERENCES user NOT NULL, is_active INTEGER NOT NULL, goal INTEGER NOT NULL)""") return conn
c3e32534de39a53686672c5c537a2c277fa2d06d
709,334
def build_headers(access_token, client_id): """ :param access_token: Access token granted when the user links their account :param client_id: This is the api key for your own app :return: Dict of headers """ return {'Content-Type': 'application/json', 'Authorization': f'Bearer {access_token}', 'trakt-api-version': '2', 'trakt-api-key': client_id}
5cd8ae3e06f67b7a4fdb1644ae82c62cb54479cb
709,337
def values(series): """Count the values and sort. series: pd.Series returns: series mapping from values to frequencies """ return series.value_counts(dropna=False).sort_index()
d4ef6b93b7f2790d8130ac045e9c315b8d57a245
709,341
def _get_window_size(offset, step_size, image_size): """ Calculate window width or height. Usually same as block size, except when at the end of image and only a fracture of block size remains :param offset: start columns/ row :param step_size: block width/ height :param image_size: image width/ height :return: window width/ height """ if offset + step_size > image_size: return image_size - offset else: return step_size
90d65229c54a5878fa9b2af8e30293e743679e42
709,343
def sturm_liouville_function(x, y, p, p_x, q, f, alpha=0, nonlinear_exp=2): """Second order Sturm-Liouville Function defining y'' for Lu=f. This form is used because it is expected for Scipy's solve_ivp method. Keyword arguments: x -- independent variable y -- dependent variable p -- p(x) parameter p_x -- derivative of p_x wrt x q -- q(x) parameter f -- forcing function f(x) alpha -- nonlinear parameter nonlinear_exp -- exponent of nonlinear term """ y_x = y[1] y_xx = -1*(p_x/p)*y[1] + (q/p)*y[0] + (q/p)*alpha*y[0]**nonlinear_exp - f/p return [y_x, y_xx]
5c34cc622075c640fe2dec03b1ae302192d0f779
709,344
def is_private(key): """ Returns whether or not an attribute is private. A private attribute looks like: __private_attribute__. :param key: The attribute key :return: bool """ return key.startswith("__") and key.endswith("__")
498e7522e95317dbb171961f0f5fe8350c29a69d
709,345
def _channel_name(row, prefix="", suffix=""): """Formats a usable name for the repeater.""" length = 16 - len(prefix) name = prefix + " ".join((row["CALL"], row["CITY"]))[:length] if suffix: length = 16 - len(suffix) name = ("{:%d.%d}" % (length, length)).format(name) + suffix return name
4452670e28b614249fb184dd78234e52ee241086
709,346
def d_out_dist_cooler(P_mass, rho_dist_cool, w_drift): """ Calculates the tube's diameter of out distilliat from distilliat cooler to distilliat volume. Parameters ---------- P_mass : float The mass flow rate of distilliat, [kg/s] rho_dist_cool : float The density of liquid at cooling temperature, [kg/m**3] w_drift :float The speed of steam at the tube, [m/s] Returns ------- d_out_dist_cooler : float The tube's diameter of out distilliat from distilliat cooler to distilliat volume, [m] References ---------- &&& """ return P_mass/(0,785*rho_dist_cool*w_drift)
8d6dfb85aa954ef88c821d2ee1d0bb787d409e96
709,347
def has_no_duplicates(input_): """Check that a list contains no duplicates. For example: ['aa', 'bb', 'cc'] is valid. ['aa', 'bb', 'aa'] is not valid. The word aa appears more than once. """ return len(input_) == len(set(input_))
6bc1b29b3509e4b17523408ea362591cace8d05d
709,353
def replace_word_choice(sentence: str, old_word: str, new_word: str) -> str: """Replace a word in the string with another word. :param sentence: str - a sentence to replace words in. :param old_word: str - word to replace :param new_word: str - replacement word :return: str - input sentence with new words in place of old words """ return sentence.replace(old_word, new_word)
27d0eae1aa12538c570fec3aa433d59c40556592
709,358
def test_if_tech_defined(enduse_fueltypes_techs): """Test if a technology has been configured, i.e. a fuel share has been assgined to one of the fueltpyes in `fuel_shares`. Arguments --------- enduse_fueltypes_techs : dict Configured technologies and fuel shares of an enduse Returns ------- c_tech_defined : bool Criteria whether technologies have been configured for an enduse or not """ c_tech_defined = False for fueltype in enduse_fueltypes_techs: if enduse_fueltypes_techs[fueltype] == {}: pass else: c_tech_defined = True break return c_tech_defined
a727b375dc1bc7e76fe63090d8e278013fa2c6bb
709,360
def map_class_to_id(classes): """ Get a 1-indexed id for each class given as an argument Note that for MASATI, len(classes) == 1 when only considering boats Args: classes (list): A list of classes present in the dataset Returns: dict[str, int] """ class_ids = list(range(1, len(classes) + 1)) return dict(zip(classes, class_ids))
7c2b47249f61f446327c0a798c1a129c62fde6b3
709,362
def get_output_detections_image_file_path(input_file_path, suffix="--detections"): """Get the appropriate output image path for a given image input. Effectively appends "--detections" to the original image file and places it within the same directory. Parameters ----------- input_file_path: str Path to input image. suffix: str Suffix appended to the file. Default: "--detections" Returns ------- str Full path for detections output image. """ input_file_path = input_file_path.replace('--original.', '.') input_file_paths = input_file_path.split('.') input_file_paths[-2] = input_file_paths[-2]+suffix return '.'.join(input_file_paths)
b8d060dff6800750c418c70c61bd4d8e0b7bb416
709,366
def either(a, b): """ :param a: Uncertain value (might be None). :param b: Default value. :return: Either the uncertain value if it is not None or the default value. """ return b if a is None else a
3fd2f99fa0851dae6d1b5f11b09182dbd29bb8c1
709,371
def get_app_label_and_model_name(path): """Gets app_label and model_name from the path given. :param str path: Dotted path to the model (without ".model", as stored in the Django `ContentType` model. :return tuple: app_label, model_name """ parts = path.split('.') return (''.join(parts[:-1]), parts[-1])
998e8d81f59491a51f3ae463c76c8627ed63b435
709,372
import math def vec_len(x): """ Length of the 2D vector""" length = math.sqrt(x[0]**2 + x[1]**2) return length
a357d31df808720eb2c4dfc12f4d6194ef904f67
709,373
def remove_empties(seq): """ Remove items of length 0 >>> remove_empties([1, 2, ('empty', np.nan), 4, 5]) [1, 2, 4, 5] >>> remove_empties([('empty', np.nan)]) [nan] >>> remove_empties([]) [] """ if not seq: return seq seq2 = [x for x in seq if not (isinstance(x, tuple) and x and x[0] == 'empty')] if seq2: return seq2 else: return [seq[0][1]]
500cbbd942682bfde1b9c1babe9a2190413b07fd
709,375
def read_code_blocks_from_md(md_path): """ Read ```python annotated code blocks from a markdown file. Args: md_path (str): Path to the markdown fle Returns: py_blocks ([str]): The blocks of python code. """ with open(md_path, "r") as f: full_md = f.read() md_py_splits = full_md.split("```python")[1:] py_blocks = [split.split("```")[0] for split in md_py_splits] return py_blocks
ca920f74e9326cf5f3635fbb6ebe125b6d97a349
709,376
def _scale_func(k): """ Return a lambda function that scales its input by k Parameters ---------- k : float The scaling factor of the returned lambda function Returns ------- Lambda function """ return lambda y_values_input: k * y_values_input
65fd06bfb1a278b106eecc4974bc9317b1dea67f
709,379
def split_rows(sentences, column_names): """ Creates a list of sentence where each sentence is a list of lines Each line is a dictionary of columns :param sentences: :param column_names: :return: """ new_sentences = [] root_values = ['0', 'ROOT', 'ROOT', 'ROOT', 'ROOT', 'ROOT', '0', 'ROOT', '0', 'ROOT'] start = [dict(zip(column_names, root_values))] for sentence in sentences: rows = sentence.split('\n') sentence = [dict(zip(column_names, row.split())) for row in rows if row[0] != '#'] sentence = start + sentence new_sentences.append(sentence) return new_sentences
444733a9c169bedae8dc0045cd696cafed7085e2
709,383
import re def fix_reference_name(name, blacklist=None): """Return a syntax-valid Python reference name from an arbitrary name""" name = "".join(re.split(r'[^0-9a-zA-Z_]', name)) while name and not re.match(r'([a-zA-Z]+[0-9a-zA-Z_]*)$', name): if not re.match(r'[a-zA-Z]', name[0]): name = name[1:] continue name = str(name) if not name: name = "data" if blacklist is not None and name in blacklist: get_new_name = lambda index: name+('_%03d' % index) index = 0 while get_new_name(index) in blacklist: index += 1 name = get_new_name(index) return name
2f1a291fc7ac9816bc2620fceeeaf90a1bb3fd4a
709,385
def comp_mass(self): """Compute the mass of the Frame Parameters ---------- self : Frame A Frame object Returns ------- Mfra: float Mass of the Frame [kg] """ Vfra = self.comp_volume() # Mass computation return Vfra * self.mat_type.struct.rho
b78ef02f045c1f624b3277ec3e358921b3ea5c02
709,393
def secondsToHMS(intervalInSeconds): """converts time in seconds to a string representing time in hours, minutes, and seconds :param intervalInSeconds: a time measured in seconds :returns: time in HH:MM:SS format """ interval = [0, 0, intervalInSeconds] interval[0] = (interval[2] / 3600) - ((interval[2] % 3600) / 3600) interval[1] = ((interval[2] % 3600) / 60) - ((interval[2] % 3600) % 60) / 60 interval[2] = interval[2] % 60 intervalString = '{0:02.0f}:{1:02.0f}:{2:02.0f}'.format(interval[0], interval[1], interval[2]) return intervalString
b38d4b886eaabd1361c162b6b7f55e11493dfb60
709,399
def find_layer(model, type, order=0): """ Given a model, find the Nth layer of the specified type. :param model: the model that will be searched :param type: the lowercase type, as it is automatically saved by keras in the layer's name (e.g. conv2d, dense) :param order: 0 by default (the first matching layer will be returned) :return: The index of the matching layer or None if it was not found. """ num_found = 0 for layer in model.layers: if type + '_' in layer.get_config()['name']: if order == num_found: return layer num_found += 1 return None
6d4e08c181900774b9e5666a11df9767f68a10ca
709,400
def _find_weektime(datetime, time_type='min'): """ Finds the minutes/seconds aways from midnight between Sunday and Monday. Parameters ---------- datetime : datetime The date and time that needs to be converted. time_type : 'min' or 'sec' States whether the time difference should be specified in seconds or minutes. """ if time_type == 'sec': return datetime.weekday() * 24 * 60 * 60 + datetime.hour * 60 * 60 + datetime.minute * 60 + datetime.second elif time_type == 'min': return datetime.weekday() * 24 * 60 + datetime.hour * 60 + datetime.minute else: raise ValueError("Invalid time type specified.")
2ed28166d239dabdc9f8811812e472810b10c7d7
709,401
def get_samples(select_samples: list, avail_samples: list) -> list: """Get while checking the validity of the requested samples :param select_samples: The selected samples :param avail_samples: The list of all available samples based on the range :return: The selected samples, verified """ # Sample number has to be positive if True in [_ < 0 for _ in select_samples]: raise ValueError( "Number of samples with -ns has to be strictly positive!") # Sample number has to be within the available sample elif False in [_ in avail_samples for _ in select_samples]: raise ValueError( "Some or all selected samples are not available in the design") return select_samples
e1c0c98697d2c504d315064cbdfbad379165d317
709,403
import collections def _find_stop_area_mode(query_result, ref): """ Finds the mode of references for each stop area. The query results must have 3 columns: primary key, foreign key reference and number of stop points within each area matching that reference, in that order. :param ref: Name of the reference column. :returns: Two lists; one to be to be used with `bulk_update_mappings` and the other strings for invalid areas. """ # Group by stop area and reference stop_areas = collections.defaultdict(dict) for row in query_result: stop_areas[row[0]][row[1]] = row[2] # Check each area and find mode matching reference update_areas = [] invalid_areas = {} for sa, count in stop_areas.items(): max_count = [k for k, v in count.items() if v == max(count.values())] if len(max_count) == 1: update_areas.append({"code": sa, ref: max_count[0]}) else: invalid_areas[sa] = max_count return update_areas, invalid_areas
e4677638b272e67d2ae21ee97f71f1f1700fd072
709,404