content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
import re def parse_directory(filename): """ read html file (nook directory listing), return users as [{'name':..., 'username':...},...] """ try: file = open(filename) html = file.read() file.close() except: return [] users = [] for match in re.finditer(r'<b>([^<]+)</b>.*?mailto:([^@]+)@', html): groups = match.groups() users.append({'name':groups[0], 'username':groups[1]}) users.sort(key=lambda x:x['username']) return users
1b7fc5b6257b5c382f520a60c9227e8b458d482d
961
def countSort(alist): """计数排序""" if alist == []: return [] cntLstLen = max(alist) + 1 cntLst = [0] * cntLstLen for i in range(len(alist)): cntLst[alist[i]] += 1 #数据alist[i] = k就放在第k位 alist.clear() for i in range(cntLstLen): while cntLst[i] > 0: #将每个位置的数据k循环输出多次 alist.append(i) cntLst[i] -= 1 return alist
6727b41794dc2a2f826023c2a53202798dfa49ab
962
from datetime import datetime def get_default_date_stamp(): """ Returns the default date stamp as 'now', as an ISO Format string 'YYYY-MM-DD' :return: """ return datetime.now().strftime('%Y-%m-%d')
672cd98265b19da2df92c7849f1059e5988473d7
963
def get_physical_id(r_properties): """ Generated resource id """ bucket = r_properties['Bucket'] key = r_properties['Key'] return f's3://{bucket}/{key}'
2cd467d9b1df72a4573d99f7a5d799f9612239c9
965
def _rescale(vector): """Scale values in vector to the range [0, 1]. Args: vector: A list of real values. """ # Subtract min, making smallest value 0 min_val = min(vector) vector = [v - min_val for v in vector] # Divide by max, making largest value 1 max_val = float(max(vector)) try: return [v / max_val for v in vector] except ZeroDivisionError: # All values are the same return [1.0] * len(vector)
0091deb65c67ef55b2632ac8d5ff8a15b275d12e
966
def better_get_first_model_each_manufacturer(car_db): """Uses map function and lambda to avoid code with side effects.""" result = map(lambda x: x[0], car_db.values()) # convert map to list return list(result)
8969c23bfe4df2b1c164dca6c4f929a62de5ba2a
968
def parse_study(study): """Parse study Args: study (object): object from DICOMDIR level 1 object (children of patient_record) Returns: children_object appending_keys """ #study_id = study.StudyID study_date = study.StudyDate study_time = study.StudyTime study_des = study.StudyDescription return study.children, study_date, study_time, study_des
d0e85d991e4f2f13e6f2bd87c0823858ea9c83bc
969
import pickle def load_pickle(filename): """Load Pickfle file""" filehandler = open(filename, 'rb') return pickle.load(filehandler)
f93b13616f94c31bc2673232de14b834a8163c5f
970
import functools def pipe(*functions): """ pipes functions one by one in the provided order i.e. applies arg1, then arg2, then arg3, and so on if any arg is None, just skips it """ return functools.reduce( lambda f, g: lambda x: f(g(x)) if g else f(x), functions[::-1], lambda x: x) if functions else None
f58afedd5c7fe83edd605b12ca0e468657a78b56
971
def test_lambda_expressions(): """Lambda 表达式""" # 这个函数返回两个参数的和:lambda a, b: a+b # 与嵌套函数定义一样,lambda函数可以引用包含范围内的变量。 def make_increment_function(delta): """本例使用 lambda 表达式返回函数""" return lambda number: number + delta increment_function = make_increment_function(42) assert increment_function(0) == 42 assert increment_function(1) == 43 assert increment_function(2) == 44 # lambda 的另一种用法是将一个小函数作为参数传递。 pairs = [(1, 'one'), (2, 'two'), (3, 'three'), (4, 'four')] # 按文本键对排序。 pairs.sort(key=lambda pair: pair[1]) assert pairs == [(4, 'four'), (1, 'one'), (3, 'three'), (2, 'two')]
e727df25b2165bb0cd7c9cce47700e86d37a2a1a
973
import importlib def get_rec_attr(obj, attrstr): """Get attributes and do so recursively if needed""" if attrstr is None: return None if "." in attrstr: attrs = attrstr.split('.', maxsplit=1) if hasattr(obj, attrs[0]): obj = get_rec_attr(getattr(obj, attrs[0]), attrs[1]) else: try: obj = get_rec_attr(importlib.import_module(obj.__name__ + "." + attrs[0]), attrs[1]) except ImportError: raise else: if hasattr(obj, attrstr): obj = getattr(obj, attrstr) return obj
a6831d48c79b8c58542032385a5c56373fd45321
977
def _get_message_mapping(types: dict) -> dict: """ Return a mapping with the type as key, and the index number. :param types: a dictionary of types with the type name, and the message type :type types: dict :return: message mapping :rtype: dict """ message_mapping = {} entry_index = 2 # based on the links found, they normally start with 2? for _type, message in types.items(): message_mapping[_type] = entry_index entry_index += 1 return message_mapping
a098e0386aa92c41d4d404154b0b2a87ce9365ce
978
import itertools def combinations(): """Produce all the combinations for different items.""" combined = itertools.combinations('ABC', r=2) combined = [''.join(possibility) for possibility in combined] return combined
501060cf9c7de9b4b4453940e017ad30cec2f84f
980
import os def _get_credentials(vcap_services, service_name=None): """Retrieves the credentials of the VCAP Service of the specified `service_name`. If `service_name` is not specified, it takes the information from STREAMING_ANALYTICS_SERVICE_NAME environment variable. Args: vcap_services (dict): A dict representation of the VCAP Services information. service_name (str): One of the service name stored in `vcap_services` Returns: dict: A dict representation of the credentials. Raises: ValueError: Cannot find `service_name` in `vcap_services` """ service_name = service_name or os.environ.get('STREAMING_ANALYTICS_SERVICE_NAME', None) # Get the service corresponding to the SERVICE_NAME services = vcap_services['streaming-analytics'] creds = None for service in services: if service['name'] == service_name: creds = service['credentials'] break # If no corresponding service is found, error if creds is None: raise ValueError("Streaming Analytics service " + str(service_name) + " was not found in VCAP_SERVICES") return creds
a72f3e7b6be56ab6c66252cd8063fd0207aac02b
981
def is_str_str_dict(x): """Tests if something is a str:str dictionary""" return isinstance(x, dict) and all( isinstance(k, str) and isinstance(v, str) for k, v in x.items() )
ce6230714c0526764f2cc67e4dedf598acd28169
982
def _ensureListLike(item): """ Return the item if it is a list or tuple, otherwise add it to a list and return that. """ return item if (isinstance(item, list) or isinstance(item, tuple)) \ else [item]
1c602a1fcf8dd6a5b4583264e63e38747f5b0d50
983
import io def get_file_from_gitlab(gitpkg, path, ref="master"): """Retrieves a file from a Gitlab repository, returns a (StringIO) file.""" return io.StringIO(gitpkg.files.get(file_path=path, ref=ref).decode())
7eccad01a538bdd99651b0792aff150f73e82cdd
984
def mocked_requests_get(*args, **kwargs): """Mock requests.get invocations.""" class MockResponse: """Class to represent a mocked response.""" def __init__(self, json_data, status_code): """Initialize the mock response class.""" self.json_data = json_data self.status_code = status_code def json(self): """Return the json of the response.""" return self.json_data if str(args[0]).startswith('https://api.ring.com/clients_api/session'): return MockResponse({ "profile": { "authentication_token": "12345678910", "email": "foo@bar.org", "features": { "chime_dnd_enabled": False, "chime_pro_enabled": True, "delete_all_enabled": True, "delete_all_settings_enabled": False, "device_health_alerts_enabled": True, "floodlight_cam_enabled": True, "live_view_settings_enabled": True, "lpd_enabled": True, "lpd_motion_announcement_enabled": False, "multiple_calls_enabled": True, "multiple_delete_enabled": True, "nw_enabled": True, "nw_larger_area_enabled": False, "nw_user_activated": False, "owner_proactive_snoozing_enabled": True, "power_cable_enabled": False, "proactive_snoozing_enabled": False, "reactive_snoozing_enabled": False, "remote_logging_format_storing": False, "remote_logging_level": 1, "ringplus_enabled": True, "starred_events_enabled": True, "stickupcam_setup_enabled": True, "subscriptions_enabled": True, "ujet_enabled": False, "video_search_enabled": False, "vod_enabled": False}, "first_name": "Home", "id": 999999, "last_name": "Assistant"} }, 201) elif str(args[0])\ .startswith("https://api.ring.com/clients_api/ring_devices"): return MockResponse({ "authorized_doorbots": [], "chimes": [ { "address": "123 Main St", "alerts": {"connection": "online"}, "description": "Downstairs", "device_id": "abcdef123", "do_not_disturb": {"seconds_left": 0}, "features": {"ringtones_enabled": True}, "firmware_version": "1.2.3", "id": 999999, "kind": "chime", "latitude": 12.000000, "longitude": -70.12345, "owned": True, "owner": { "email": "foo@bar.org", "first_name": "Marcelo", "id": 999999, "last_name": "Assistant"}, "settings": { "ding_audio_id": None, "ding_audio_user_id": None, "motion_audio_id": None, "motion_audio_user_id": None, "volume": 2}, "time_zone": "America/New_York"}], "doorbots": [ { "address": "123 Main St", "alerts": {"connection": "online"}, "battery_life": 4081, "description": "Front Door", "device_id": "aacdef123", "external_connection": False, "features": { "advanced_motion_enabled": False, "motion_message_enabled": False, "motions_enabled": True, "people_only_enabled": False, "shadow_correction_enabled": False, "show_recordings": True}, "firmware_version": "1.4.26", "id": 987652, "kind": "lpd_v1", "latitude": 12.000000, "longitude": -70.12345, "motion_snooze": None, "owned": True, "owner": { "email": "foo@bar.org", "first_name": "Home", "id": 999999, "last_name": "Assistant"}, "settings": { "chime_settings": { "duration": 3, "enable": True, "type": 0}, "doorbell_volume": 1, "enable_vod": True, "live_view_preset_profile": "highest", "live_view_presets": [ "low", "middle", "high", "highest"], "motion_announcement": False, "motion_snooze_preset_profile": "low", "motion_snooze_presets": [ "none", "low", "medium", "high"]}, "subscribed": True, "subscribed_motions": True, "time_zone": "America/New_York"}] }, 200) elif str(args[0]).startswith("https://api.ring.com/clients_api/doorbots"): return MockResponse([{ "answered": False, "created_at": "2017-03-05T15:03:40.000Z", "events": [], "favorite": False, "id": 987654321, "kind": "motion", "recording": {"status": "ready"}, "snapshot_url": "" }], 200)
41a54452593cd23e8ea86f1fbdc0c5e92845482f
985
def count_disordered(arr, size): """Counts the number of items that are out of the expected order (monotonous increase) in the given list.""" counter = 0 state = { "expected": next(item for item in range(size) if item in arr), "checked": [] } def advance_state(): state["expected"] += 1 while True: in_arr = state["expected"] in arr is_overflow = state["expected"] > size not_checked = state["expected"] not in state["checked"] if not_checked and (in_arr or is_overflow): return state["expected"] += 1 for val in arr: if val == state["expected"]: advance_state() else: counter += 1 state["checked"].append(val) return counter
bb708e7d862ea55e81207cd7ee85e634675b3992
986
import json def json_io_dump(filename, data): """ Dumps the the JSON data and returns it as a dictionary from filename :arg filename <string> - Filename of json to point to :arg data - The already formatted data to dump to JSON """ with open(filename, encoding='utf-8', mode='w') as json_file: json.dump(data, json_file) return True
e0ae7187ac29669330109ae39ebcac33c1e30ab6
987
import requests def reload_rules(testcase, rest_url): """ :param TestCase self: TestCase object :param str rest_url: http://host:port :rtype: dict """ resp = requests.get(rest_url + "/rest/reload").json() print("Reload rules response: {}".format(resp)) testcase.assertEqual(resp.get("success"), True) return resp
e747668ba8ad5f58f0307194b0008469dd3593c1
988
def subset_sum(arr, target_sum, i, cache): """ Returns whether any subset(not contiguous) of the array has sum equal to target sum. """ if target_sum == 0: return True, {} if i < 0: return False, {} if target_sum in cache[i]: return cache[i][target_sum] # Either include this element or not! sub_ans, sub_ans_indices = subset_sum(arr, target_sum, i - 1, cache) if not sub_ans and target_sum >= arr[i]: sub_ans, sub_ans_indices = subset_sum(arr, target_sum - arr[i], i - 1, cache) sub_ans_indices = set(sub_ans_indices) sub_ans_indices.add(i) if not sub_ans: sub_ans_indices = {} cache[i][target_sum] = sub_ans, sub_ans_indices return cache[i][target_sum]
aa90d7eb4ffa3a457a5f27733de56a82df450861
989
def every(n_steps): """Returns True every n_steps, for use as *_at functions in various places.""" return lambda step: step % n_steps == 0
02fc6bc59fa6f223b681539baeae32c40bd9577e
991
def calc_batch_size(num_examples, batches_per_loop, batch_size): """Reduce the batch size if needed to cover all examples without a remainder.""" assert batch_size > 0 assert num_examples % batches_per_loop == 0 while num_examples % (batch_size * batches_per_loop) != 0: batch_size -= 1 return batch_size
3c394813a98a8414645f633a519001937247e8b0
992
def has_admin_access(user): """Check if a user has admin access.""" return user == 'admin'
d178861bee504f6f3026c9e495d56cc8d2d7c3d3
993
def flatten_mock_calls(mock): """ Flatten the calls performed on a particular mock object, into a list of calls with arguments. """ result = [] for call in mock.mock_calls: call = list(call) call_name = call[0] if '.' in str(call_name): call_name = str(call_name).split('.')[-1] result.append([call_name] + call[1:]) return result
7c41025382f4ca25db1ccd328e9eb17e1d72a01a
995
def update_not_existing_kwargs(to_update, update_from): """ This function updates the keyword aguments from update_from in to_update, only if the keys are not set in to_update. This is used for updated kwargs from the default dicts. """ if to_update is None: to_update = {} to_update.update({k:v for k,v in update_from.items() if k not in to_update}) return to_update
a66de151e6bc6d8f5b2f1b0ff32e30d2c8cb5277
996
def linear_forward(A, W, b): """Returns Z, (A, W, b)""" Z = (W @ A) + b cache = (A, W, b) return Z, cache
41d223473d2d8f084f13ca0f90f483b66e479a04
997
def piecewise_accel(duration,initial,final): """Defines a piecewise acceleration. Args: duration (float): Length of time for the acceleration to complete. initial (float): Initial value. final (float): Final value. """ a = (final-initial) return lambda t: initial + a * ( (9./2 * t**3/duration**3) * (t<duration/3) + (-9*t**3/duration**3 + 27./2*t**2/duration**2 - 9./2*t/duration + 1./2) * (t<2*duration/3)*(t>=duration/3) + (9./2*t**3/duration**3 - 27./2 * t**2/duration**2 + 27./2*t/duration - 7./2) * (t>= 2*duration/3))
7f6acd7ba2610a2e56cc1f0758b3a39543bfe8c2
999
def palindrome(d: int)-> str: """ Function is getting the digits of the number, left shifting it by multiplying it with 10 at each iteration and adding it the previous result. Input: Integer Output: String (Sentence telling if the number is palindrome or not) """ remainder = 0 revnum = 0 n = len(str(d)) copynum2 = d while copynum2 != 0: remainder = copynum2%10 revnum = revnum * 10 + remainder copynum2 //= 10 if d == revnum: return "Given Numer {} is palindrome".format(d) else: return "Given Numer {} is not palindrome".format(d)
fe654ab92a905e265987856bcd2106c7b082b490
1,000
from typing import List def add_multiple_package(package_list: List[str]) -> str: """ Generate latex code to add multiple package to preamble :param package_list: List of package to add in preamble """ usepackage_command_list = [] for package in package_list: usepackage_command_list.append(rf"""\usepackage{{{package}}}""") return "\n".join(usepackage_command_list)
90bdd0a521c094d92c35ef92e62d6b43f6b135b4
1,002
import sys def get_next_method(generator_instance): """ Cross-platform function that retrieves the 'next' method from a generator instance. :type generator_instance: Any :rtype: () -> Any """ if sys.version_info > (3, 0): return generator_instance.__next__ else: return generator_instance.next
115bdd13c5afc74d1d5204d004c1034ae6438cb1
1,003
def commit_veto(environ, status, headers): """Veto a commit. This hook is called by repoze.tm in case we want to veto a commit for some reason. Return True to force a rollback. By default we veto if the response's status code is an error code. Override this method, or monkey patch the instancemethod, to fine tune this behaviour. """ return not 200 <= int(status.split(None, 1)[0]) < 400
9fc96fe8cdbedde20cb325e189b71d9df94cf176
1,004
def apply_to_all(func, results, datasets): """Apply the given function to all results Args: func: the function to apply results: nested dictionary where the nested levels are: algorithm name, sensitive attribute and split ID datasets: nested dictionary where the nested levels are: sensitive attribute and split ID Returns: a nested dictionary with the same structure as `results` that contains the output of the given function """ output = {} for algo in results: output[algo] = {} for sensitive in results[algo]: output[algo][sensitive] = {} for split_id in results[algo][sensitive]: output[algo][sensitive][split_id] = func( results[algo][sensitive][split_id], datasets[sensitive][split_id]) return output
6ea085b3541a84ac97f63389ba83c3a06d5e0b85
1,005
def any_value_except(mapping, excluded_keys): """Return a random value from a dict that is not associated with excluded_key. Raises StopIteration if there are no other keys than excluded_key""" return next(mapping[key] for key in mapping if key not in excluded_keys)
8d633713b93cfd1f0324d5c4a56a18fa7931ff06
1,006
import torch def one_hot(y, num_dim=10): """ One Hot Encoding, similar to `torch.eye(num_dim).index_select(dim=0, index=y)` :param y: N-dim tenser :param num_dim: do one-hot labeling from `0` to `num_dim-1` :return: shape = (batch_size, num_dim) """ one_hot_y = torch.zeros(y.size(0), num_dim) if y.is_cuda: one_hot_y = one_hot_y.cuda() return one_hot_y.scatter_(1, y.view(-1, 1), 1.)
694bfea18ecbb5c5737e0d38c0aa0f5f52a82a55
1,007
import os def getProgFromFile(f): """Get program name from __file__. """ if f.endswith(".py"): f = f[:-3] return os.path.basename(f)
474c9b3f2bef2117daf8456d8b6f026d738182a1
1,009
def heuristic(node_1, node_2): """ Heuristic when only 4 directions are posible (Manhattan) """ (x_node_1, y_node_1) = node_1 (x_node_2, y_node_2) = node_2 return abs(x_node_1 - x_node_2) + abs(y_node_1 - y_node_2)
e431ed9d8a7acb34604b3e83c3f3d7774cd27d51
1,011
def extend_dict(x, *y): """Similar to Object.assign() / _.extend() in Javascript, using 'dict.update()' Args: x (dict): the base dict to merge into with 'update()' *y (dict, iter): any number of dictionary or iterable key/value pairs to be sequentially merged into 'x'. Skipped if None. """ z = x.copy() for d in [d for d in y if d is not None]: z.update(d) return z
f10a5bc7d5ed3646e6a9f8f9535a16bd800c7fcd
1,012
def print_hdr(soup, hdr, file = None): """ :param soup: [bs4.BeautifulSoup] document context :param hdr: [dict] header node to process :param file: [stream] I/O stream to print to :return: [stream] pass on the I/O stream so descent continues """ tag = hdr['tag'] tag_id = tag['id'] indent = (hdr['level'] - 1) * ' ' # do this replacement for (relative) readability content_tags = ["<%s>" % (h.name) if h.name else h.string for h in hdr['content']] print("%s%s - %s %s" % (indent, tag.name, tag_id, content_tags), file=file) return file
2c6fd613a5c6ddb5ec842fb7cee845d1a8771ccd
1,014
def make_general_csv_rows(general_csv_dict): """ Method for make list of metrics from general metrics dict. Rows using in general metrics writer :param general_csv_dict: dict with all metrics :type general_csv_dict: dict :return: all metrics as rows :rtype: list """ rows = [] for key, value in general_csv_dict.items(): row = [key[0], key[1]] row.extend(value) rows.append(row) return rows
45ca165d312b39cd0b7088e0bcbfb402a92e7e2b
1,015
def get_speakable_timestamp(timestamp): """Return a 'speakable' timestamp, e.g. 8am, noon, 9pm, etc.""" speakable = f"{timestamp.strftime('%I').lstrip('0')} {timestamp.strftime('%p')}" if speakable == '12 PM': return 'noon' elif speakable == '12 AM': return 'midnight' return speakable
0b724686ebd5d3152d9017dc456d2945c78be0ee
1,016
import torch def _featurize(inputs,model): """ Helper function used to featurize exemplars before feeding into buffer. """ with torch.no_grad(): # Forward pass outputs = model(*inputs).detach() #Featurize raw exem return outputs
191fd1b362f38309a35618284fcf3f1910a06bd6
1,017
import pandas as pd import numpy as np def rm_standard_dev(var,window): """ Smoothed standard deviation """ print('\n\n-----------STARTED: Rolling std!\n\n') rollingstd = np.empty((var.shape)) for ens in range(var.shape[0]): for i in range(var.shape[2]): for j in range(var.shape[3]): series = pd.Series(var[ens,:,i,j]) rollingstd[ens,:,i,j] = series.rolling(window).std().to_numpy() newdata = rollingstd[:,window:,:,:] print('-----------COMPLETED: Rolling std!\n\n') return newdata
d37cfa3c756f8fc062a28ac078e4e16557282951
1,018
def make_file_prefix(run, component_name): """ Compose the run number and component name into string prefix to use with filenames. """ return "{}_{}".format(component_name, run)
73ef37d75d9e187ee49ee058958c3b8701185585
1,022
def parse_resolution(resolution): """ return: width, height, resolution """ resolution = resolution.strip() splits = resolution.split(',') return int(splits[0]), int(splits[1]), int(splits[2])
de937e440c4540d11cedd868e3f4a046baa99f22
1,023
import argparse def _parser() -> argparse.Namespace: """Take care of all the argparse stuff. :returns: the args """ # parser = GooeyParser(description='Remove : from data files') parser = argparse.ArgumentParser(description='Combines Nods using ') parser.add_argument('listspectra', help='List of spectra to combine.', default=False) parser.add_argument('-o', "--optimal-nods", help="Optimal nod bool matrix file.") parser.add_argument("-s", "--spectralcoords", default=False, action="store_true", help="Turn spectra into spectral coordinates first before adding. Default=False") parser.add_argument("-n", "--nod_num", help="Number of nods in the nod cycle, default=8", default=8, type=int) parser.add_argument("-c", "--combination", help="Nod combination method, default=all means do all three.", default="all", choices=["all", "optimal", "non-opt", "mix"]) parser.add_argument("-u", "--unnorm", help="Combine the un-normalized nods.", action="store_true") parser.add_argument("--snr", help="Show snr of continuum.", action="store_true") parser.add_argument("-p", "--plot", help="Show the plots.", action="store_true") parser.add_argument("--output_verify", help="Fits file verification mode", default="fix+warn") parser.add_argument("-r", "--overwrite", help="Overwrite output file if already exists", action="store_true") args = parser.parse_args() return args
3edcefc24898d15fd67925729590710a4f0d1fb5
1,024
import base64 def multibase_b64decode(data): """ Follow forge's base64 urlsafe encode convention to decode string Args: data(string): encoded string Returns: bytes Examples: >>> multibase_b64decode('aGVsbG8') b'hello' """ if isinstance(data, str): data = data.encode() return base64.urlsafe_b64decode( (data + b'=' * (-len(data) % 4)))
fdbc0f937e33d7994737a3a515973598cac3debd
1,025
def filter_words(w_map, emb_array, ck_filenames): """ delete word in w_map but not in the current corpus """ vocab = set() for filename in ck_filenames: for line in open(filename, 'r'): if not (line.isspace() or (len(line) > 10 and line[0:10] == '-DOCSTART-')): line = line.rstrip('\n').split() assert len(line) >= 3, 'wrong ck file format' word = line[0] vocab.add(word) word = word.lower() vocab.add(word) new_w_map = {} new_emb_array = [] for (word, idx) in w_map.items(): if word in vocab or word in ['<unk>', '<s>', '< >', '<\n>']: assert word not in new_w_map new_w_map[word] = len(new_emb_array) new_emb_array.append(emb_array[idx]) print('filtered %d --> %d' % (len(emb_array), len(new_emb_array))) return new_w_map, new_emb_array
efdef92093acf25c992dba86da25a4118ba728ec
1,028
def get_cache_template(sources, grids, geopackage, table_name="tiles"): """ Returns the cache template which is "controlled" settings for the application. The intent is to allow the user to configure certain things but impose specific behavior. :param sources: A name for the source :param grids: specific grid for the data source :param geopackage: Location for the geopackage :return: The dict template """ if sources == ["None"]: sources = [] return { "sources": sources, "cache": {"type": "geopackage", "filename": str(geopackage), "table_name": table_name}, "grids": [grid for grid in grids if grid == "default"] or grids, "format": "mixed", "request_format": "image/png", }
dc83a155d28e0b39f12a7dc7142b61a4bf27512b
1,029
def _row_key(row): """ :param row: a normalized row from STATEMENT_METRICS_QUERY :return: a tuple uniquely identifying this row """ return row['database_name'], row['user_name'], row['query_signature'], row['query_hash'], row['query_plan_hash']
2984e0e0b5fcc4e51a26af188e51fe65c52077a2
1,031
import hashlib def sha1_file(filename): """ Return the hex string representation of the SHA1 checksum of the filename """ s = hashlib.sha1() with open(filename, "rb") as f: for line in f: s.update(line) return s.hexdigest()
b993ac9f025d69124962905f87b1968617bb33f5
1,032
def read_from_file(file_path): """ Read a file and return a list with all the lines in the file """ file_in_list = [] with open(file_path, 'r') as f: for line in f.readlines(): file_in_list.append(line) return file_in_list
5fef3a3f50528c1a9786451666ae7e43be282bf9
1,033
def get_object_from_path(path): """ :param path: dot seperated path. Assumes last item is the object and first part is module path(str) - example: cls = get_object_from_path("a.module.somewhere.MyClass") you can create a path like this: class_path = "{0}.{1}".format(MyClass.__module__, MyClass.__name__) """ module_path, _, obj_name = path.rpartition(".") module = __import__(module_path, globals(), locals(), [obj_name], -1) obj = getattr(module, obj_name, None) return obj
e722b040486288d53fe4a357d81ddec8dfc9820e
1,034
def audio(src: str) -> str: """ Insert audio tag The tag is currently not supported by Nuance, please use `audio_player` kit: docs/use_kits_and_actions.md :param src: :return: """ return f'<audio src="{src}"/>'
f9396d5f82eeca27089de41187fd7d5e967cc9cf
1,037
import platform def is_windows_system(): """ | ##@函数目的: 获取系统是否为Windows | ##@参数说明:True or False | ##@返回值: | ##@函数逻辑: | ##@开发人:jhuang | ##@时间: """ return 'Windows' in platform.system()
6bfe296188b9dccf8338f0b2bbaaf146d9b22243
1,040
import sqlite3 def cn(DB): """Return the cursor and connection object.""" conn = sqlite3.connect(DB) c = conn.cursor() return (c,conn)
76abbec283d45732213f8b94031242146cdb4ee0
1,043
import copy def partially_matched_crossover(random, mom, dad, args): """Return the offspring of partially matched crossover on the candidates. This function performs partially matched crossover (PMX). This type of crossover assumes that candidates are composed of discrete values that are permutations of a given set (typically integers). It produces offspring that are themselves permutations of the set. .. Arguments: random -- the random number generator object mom -- the first parent candidate dad -- the second parent candidate args -- a dictionary of keyword arguments Optional keyword arguments in args: - *crossover_rate* -- the rate at which crossover is performed (default 1.0) """ crossover_rate = args.setdefault('crossover_rate', 1.0) if random.random() < crossover_rate: size = len(mom) points = random.sample(range(size), 2) x, y = min(points), max(points) bro = copy.copy(dad) bro[x:y+1] = mom[x:y+1] sis = copy.copy(mom) sis[x:y+1] = dad[x:y+1] for parent, child in zip([dad, mom], [bro, sis]): for i in range(x, y+1): if parent[i] not in child[x:y+1]: spot = i while x <= spot <= y: spot = parent.index(child[spot]) child[spot] = parent[i] return [bro, sis] else: return [mom, dad]
b0d5132cf4ca14095f3d7c637cb50db3fe37d244
1,044
import re def regex_trim(input, regex, replace=''): """ Trims or replaces the regex match in an input string. input (string): the input string to search for matches regex (string): regex to match replace (string - optional): a string to replace any matches with. Defaults to trimming the match. """ return re.sub(regex, replace, input)
169bfaa0d2bfd7a1f32c1e05a63b41993f82bf4b
1,045
def analytic_pi(x, c, w, h): """Analytic response function for an even pair of Lorentz distributions. Correspond to .. math:: \\Pi(x) = \\int_{-\infty}^{\\infty} \\frac{\\omega^2}{\\omega^2+x^2}\sigma()_{i} where :math:`\\sigma(\\omega)` is :func:`~even_lorentzian`. Args: x (array): matsubara at which the response function is evaluated c (float): Center of the distribution (+ or -) w (float): Width of the distribution (variance) h (float): Height/weight of the distribtuion (area under the curve) Returns: array: Values of the integral at imaginary `x` """ return 2*h*c/(c**2+(x+w)**2)
fc622e79a6692105c15e05ea353ba925b8378831
1,047
def prepare_definitions(defs, prefix=None): """ prepares definitions from a dictionary With a provided dictionary of definitions in key-value pairs and builds them into an definition list. For example, if a dictionary contains a key ``foo`` with a value ``bar``, the returns definitions will be a list with the values ``['foo=bar']``. If a key contains a value of ``None``, the key will be ignored and will not be part of the final definition list. If a ``prefix`` value is provided, each definition entry will be prefixed with the provided value. Args: defs: the arguments to process prefix (optional): prefix value to prefix each definition Returns: list of arguments """ final = [] if defs: for key, val in defs.items(): if val is None: continue if prefix: key = prefix + key if val: final.append('{}={}'.format(key, val)) else: final.append(key) return final
ddc6d14cc18f8afba766efee65ab365df1d226c2
1,048
def extract_job_url(job): """ parse the job data and extract the str for the URL of the job posted params: job str: html str representation from bs4 returns: url str: relative URL path of the job ad """ return job.a["href"]
7517badcc2814e641c04a8f880353d897d434b7f
1,049
def create_patric_boolean_dict(genome_dict,all_ECs): """ Create new dict of dicts to store genome names :param genome_dict: dict of key=genome_id, value=dict of genome's name, id, ec_numbers :param all_ECs: set of all ECs found across all genomes """ ## new format: key=genome, value={EC:0 or 1} ## This makes it easy to write to file with pandas boolean_genome_dict = {} for genome_id in genome_dict: boolean_genome_dict[genome_id] = {} boolean_genome_dict[genome_id]['genome_name'] = genome_dict[genome_id]['genome_name'] boolean_genome_dict[genome_id]['genome_name_with_id'] = genome_dict[genome_id]['genome_name_with_id'] boolean_genome_dict[genome_id]['duplicate'] = genome_dict[genome_id]['duplicate'] for EC in all_ECs: if EC in genome_dict[genome_id]['ECs']: boolean_genome_dict[genome_id][EC] = 1 else: boolean_genome_dict[genome_id][EC] = 0 return boolean_genome_dict
7ab3554bbf705ee8ce99d1d99ff453b06e3d2b53
1,050
def append_ast_if_req(field): """ Adds a new filter to template tags that for use in templates. Used by writing {{ field | append_ast_if_req }} @register registers the filter into the django template library so it can be used in template. :param Form.field field: a field of a form that you would like to return the label and potentially an asterisk for. :returns: The field label and, if it's a required field, an asterisk :rtype: string """ if field.field.required: return field.label + '*' else: return field.label
76e36ead3387729b0536bf84f288c400f376a041
1,051
def getPileupMixingModules(process): """ Method returns two lists: 1) list of mixing modules ("MixingModule") 2) list of data mixing modules ("DataMixingModules") The first gets added only pileup files of type "mc", the second pileup files of type "data". """ mixModules, dataMixModules = [], [] prodsAndFilters = {} prodsAndFilters.update(process.producers) prodsAndFilters.update(process.filters) for key, value in prodsAndFilters.items(): if value.type_() in ["MixingModule", "DataMixingModule", "PreMixingModule"]: mixModules.append(value) if value.type_() == "DataMixingModule": dataMixModules.append(value) return mixModules, dataMixModules
4ee3cc5f7b11e4ad6a846f14dc99e4f82bd04905
1,052
import random def check_random_state(seed): """ Turn seed into a random.Random instance If seed is None, return the Random singleton used by random. If seed is an int, return a new Random instance seeded with seed. If seed is already a Random instance, return it. Otherwise raise ValueError. """ # Code slightly adjusted from scikit-learn utils/validation.py if seed is None or isinstance(seed, int): rng = random.Random(seed) elif isinstance(seed, random.Random): rng = seed else: raise ValueError( "### error: '{}' cannot be used to seed random.Random instance.".format( seed ) ) return rng
347481de01f4a3bba59bc9a2c484c10d4857e1e2
1,055
def chunk(seq, size, groupByList=True): """Returns list of lists/tuples broken up by size input""" func = tuple if groupByList: func = list return [func(seq[i:i + size]) for i in range(0, len(seq), size)]
e7cece99822a01476b46351cebc1345793485cbd
1,056
def definition(): """ Lists the parent-child relationships through the curriculum structure. """ sql = """ --Course to session SELECT c.course_id as parent_id, CASE WHEN cc.course_id IS NULL THEN 0 ELSE 1 END as linked, cs.course_session_id as child_id, 'course' as parent, cs.description + ' ' + cast(cs.session as char(1)) as description, -1 as ratio,0 as changed FROM c_course c LEFT OUTER JOIN c_course_session cs on cs.curriculum_id = c.curriculum_id LEFT OUTER JOIN c_course_config cc on c.course_id = cc.course_id AND cc.course_session_id = cs.course_session_id UNION ALL --session to group SELECT a.course_session_id as parent_id, CASE WHEN c.course_session_id IS NULL THEN 0 ELSE 1 END as linked, b.cgroup_id as child_id, 'course_session' as parent, b.description, -1 as ratio, 0 as changed FROM c_course_session a LEFT OUTER JOIN c_cgroup b ON a.curriculum_id = b.curriculum_id LEFT OUTER JOIN c_course_session_config c on a.course_session_id = c.course_session_id AND b.cgroup_id = c.cgroup_id UNION ALL --CGroup to component SELECT a.cgroup_id as parent_id, CASE WHEN c.component_id IS NULL THEN 0 ELSE 1 END as linked, b.component_id as child_id, 'cgroup' as parent, b.description, ISNULL(c.ratio, 1) as ratio, 0 as changed FROM c_cgroup a LEFT OUTER JOIN c_component b ON a.curriculum_id = b.curriculum_id LEFT OUTER JOIN c_cgroup_config c on a.cgroup_id = c.cgroup_id AND b.component_id = c.component_id """ return sql
e8dc6a720dcd5f62854ce95e708a88b43859e2cc
1,057
def bilin(x, y, data, datax, datay): # --DC """ x, y ARE COORDS OF INTEREST data IS 2x2 ARRAY CONTAINING NEARBY DATA datax, datay CONTAINS x & y COORDS OF NEARBY DATA""" lavg = ( (y - datay[0]) * data[1,0] + (datay[1] - y) * data[0,0] ) / (datay[1] - datay[0]) ravg = ( (y - datay[0]) * data[1,1] + (datay[1] - y) * data[0,1] ) / (datay[1] - datay[0]) return ( (x - datax[0]) * ravg + (datax[1] - x) * lavg ) / (datax[1] - datax[0])
59a740f65c7187a08cdc09cef8aa100b01c652cf
1,058
def biosample_table_data(): """Return a dictionary containing the expected values of the BioSample Table""" columns = [ "id", "BioSample_id", "BioSampleAccession", "BioSampleAccessionSecondary", "BioSampleBioProjectAccession", "BioSampleSRAAccession", "BioSampleOrganism", "BioSampleStrain", "BioSampleSubmissionDate", "BioSampleComment", ] metadata = [ "1", "12991206", "SAMN12991206", "", "", "SRS5502739", "TestOrganism1", "TestStrain1", "2019-10-08T07:15:03.950", "", ] table_dict = {} # Populate the dict with data for i in range(0, len(columns)): key = columns[i] value = metadata[i] table_dict[key] = value return table_dict
65e5d5bb5416a8f113100562fba8f2e6fd66796a
1,059
def get_user_playlists(spotipy_obj, username): """Gets and returns all Spotify playlists owned by the username specified. Parameters: spotipy_obj: Spotipy object username: Spotify username Returns: List of dictionaries, each dictionary a Spotify playlist object. """ # Grab all user playlists, including private ones initial_playlists = spotipy_obj.user_playlists(username) final_playlists = [] while initial_playlists: for playlist in initial_playlists["items"]: if playlist["owner"]["id"] == username: final_playlists.append(playlist) if initial_playlists["next"]: initial_playlists = spotipy_obj.next(initial_playlists) else: initial_playlists = None return final_playlists
90c06e0ddd91a7a84f4d905dd9334f9b4c27f890
1,060
def compute_median_survival_time(times, surv_function): """ Computes a median survival time estimate by looking for where the survival function crosses 1/2. Parameters ---------- times : 1D numpy array Sorted list of unique times (in ascending order). surv_function : 1D numpy array A survival function evaluated at each of time in `times`, in the same order. Returns ------- output : float Median survival time estimate. """ t_left = times[0] t_right = times[-1] if surv_function[-1] > 1/2: # survival function never crosses 1/2; just output this last time point return t_right for t, s in zip(times, surv_function): if s >= 0.5: t_left = t for t, s in zip(reversed(times), reversed(surv_function)): if s <= 0.5: t_right = t return (t_left + t_right) / 2.
22103bc705acb791c0937a403aa9c34e9145e1c2
1,063
def aggregate_ant(data, sub_num, response_type="full"): """ Aggregate data from the ANT task. Calculates various summary statistics for the ANT task for a given subject. Parameters ---------- data : dataframe Pandas dataframe containing a single subjects trial data for the task. sub_num : str Subject number to which the data file belongs. response_type : {'full', 'correct', 'incorrect'}, optional Should the summary data be calculated using all trials? Only correct trials? Or only incorrect trials? This is not supported in all tasks. Returns ------- stats : list List containing the calculated data for the subject. """ # Calculate times following errors and correct responses df = data follow_error_rt = df.loc[df.correct.shift() == 0, "RT"].mean() follow_correct_rt = df.loc[df.correct.shift() == 1, "RT"].mean() if response_type == "correct": df = data[data["correct"] == 1] elif response_type == "incorrect": df = data[data["correct"] == 0] elif response_type == "full": df = data # Aggregated descriptives ## congruency conditions grouped_congruency = df.groupby("congruency") neutral_rt = grouped_congruency.mean().get_value("neutral", "RT") congruent_rt = grouped_congruency.mean().get_value("congruent", "RT") incongruent_rt = grouped_congruency.mean().get_value("incongruent", "RT") neutral_rtsd = grouped_congruency.std().get_value("neutral", "RT") congruent_rtsd = grouped_congruency.std().get_value("congruent", "RT") incongruent_rtsd = grouped_congruency.std().get_value("incongruent", "RT") neutral_rtcov = neutral_rtsd / neutral_rt congruent_rtcov = congruent_rtsd / congruent_rt incongruent_rtcov = incongruent_rtsd / incongruent_rt neutral_correct = grouped_congruency.sum().get_value("neutral", "correct") congruent_correct = grouped_congruency.sum().get_value("congruent", "correct") incongruent_correct = grouped_congruency.sum().get_value("incongruent", "correct") ## cue conditions grouped_cue = df.groupby("cue") nocue_rt = grouped_cue.mean().get_value("nocue", "RT") center_rt = grouped_cue.mean().get_value("center", "RT") spatial_rt = grouped_cue.mean().get_value("spatial", "RT") double_rt = grouped_cue.mean().get_value("double", "RT") nocue_rtsd = grouped_cue.std().get_value("nocue", "RT") center_rtsd = grouped_cue.std().get_value("center", "RT") spatial_rtsd = grouped_cue.std().get_value("spatial", "RT") double_rtsd = grouped_cue.std().get_value("double", "RT") nocue_rtcov = nocue_rtsd / nocue_rt center_rtcov = center_rtsd / center_rt spatial_rtcov = spatial_rtsd / spatial_rt double_rtcov = double_rtsd / double_rt nocue_correct = grouped_cue.sum().get_value("nocue", "correct") center_correct = grouped_cue.sum().get_value("center", "correct") spatial_correct = grouped_cue.sum().get_value("spatial", "correct") double_correct = grouped_cue.sum().get_value("double", "correct") # OLS regression conflict_intercept, conflict_slope = congruent_rt, incongruent_rt - congruent_rt conflict_slope_norm = conflict_slope / congruent_rt alerting_intercept, alerting_slope = double_rt, nocue_rt - double_rt alerting_slope_norm = alerting_slope / double_rt orienting_intercept, orienting_slope = spatial_rt, center_rt - spatial_rt orienting_slope_norm = orienting_slope / spatial_rt return [ sub_num, follow_error_rt, follow_correct_rt, neutral_rt, congruent_rt, incongruent_rt, neutral_rtsd, congruent_rtsd, incongruent_rtsd, neutral_rtcov, congruent_rtcov, incongruent_rtcov, neutral_correct, congruent_correct, incongruent_correct, nocue_rt, center_rt, spatial_rt, double_rt, nocue_rtsd, center_rtsd, spatial_rtsd, double_rtsd, nocue_rtcov, center_rtcov, spatial_rtcov, double_rtcov, nocue_correct, center_correct, spatial_correct, double_correct, conflict_intercept, conflict_slope, conflict_slope_norm, alerting_intercept, alerting_slope, alerting_slope_norm, orienting_intercept, orienting_slope, orienting_slope_norm, ]
be01651d450560a5c36bc6240025fe59352d6347
1,064
import re def url_validate(url): """ URL验证 用于登录传递URL """ regex = r'^\?next=((/\w+)*)' if isinstance(url, str) and re.match(regex, url): return url.split('?next=')[-1] return '/'
7a5aa5866018d1bf16c0f4ede527a770da760e17
1,065
def _build_tags(model_uri, model_python_version=None, user_tags=None): """ :param model_uri: URI to the MLflow model. :param model_python_version: The version of Python that was used to train the model, if the model was trained in Python. :param user_tags: A collection of user-specified tags to append to the set of default tags. """ tags = dict(user_tags) if user_tags is not None else {} tags["model_uri"] = model_uri if model_python_version is not None: tags["python_version"] = model_python_version return tags
8807967b3e9d89dbb7a24542d2709bc9293992df
1,067
from typing import Tuple def unsorted_array(arr: list) -> Tuple[list, int, Tuple[int, int]]: """ Time Complexity: O(n) """ start, end = 0, len(arr) - 1 while start < end and arr[start] < arr[start + 1]: start += 1 while start < end and arr[end] > arr[end - 1]: end -= 1 for el in arr[start : end + 1]: # another way of implementing this part would be to find the min and # max of the subarray and keep on decrementing start/incrementing end while el < arr[start]: start -= 1 while el > arr[end]: end += 1 if start + 1 < end - 1: return arr[start + 1 : end], end - start - 1, (start + 1, end - 1) return [], 0, (-1, -1)
c3370a3e76009ef26ae3e1086e773463c312c6bb
1,068
def get_tol_values(places): # type: (float) -> list """List of tolerances to test Returns: list[tuple[float, float]] -- [(abs_tol, rel_tol)] """ abs_tol = 1.1 / pow(10, places) return [(None, None), (abs_tol, None)]
5af82438abbc0889374d62181ca7f0b7ee3c0fbe
1,069
def ipv4_size_check(ipv4_long): """size chek ipv4 decimal Args: ipv4_long (int): ipv4 decimal Returns: boole: valid: True """ if type(ipv4_long) is not int: return False elif 0 <= ipv4_long <= 4294967295: return True else: return False
97c5d5c7472fb81e280f91275b5a88b032ee7927
1,070
def display_ordinal_value(glyph: str): """Displays the integer value of the given glyph Examples: >>> display_ordinal_value('🐍')\n 128013 >>> display_ordinal_value('G')\n 71 >>> display_ordinal_value('g')\n 103 """ return ord(glyph)
7daa53180023bfec2968308d463ac615a83a4e55
1,072
def _without_command(results): """A helper to tune up results so that they lack 'command' which is guaranteed to differ between different cmd types """ out = [] for r in results: r = r.copy() r.pop('command') out.append(r) return out
67927cf56884e0e3b22d0daf37e6c02eaef3849b
1,073
def print_tree(tree, level=0, current=False): """Pretty-print a dictionary configuration `tree`""" pre = ' ' * level msg = '' for k, v in tree.items(): if k == 'self': msg += print_tree(v, level) continue # Detect subdevice if isinstance(v, dict) and 'self' in v: msg += pre + '|++> ' + k + '\n' msg += print_tree(v, level + 1) continue if not current: continue v = repr(v['current']) if len(v) > 50: v = v[:46] + ' ...' msg += '{}|: {} = {}\n'.format(pre, k, v) return msg
f9697b506e9254b4982a037bdfbeb8a1d27f35bb
1,077
def chaine_polynome(poly): """Renvoie la représentation dy polynôme _poly_ (version simple)""" tab_str = [str(coef) + "*X^" + str(i) if i != 0 else str(coef) for i,coef in enumerate(poly)] return " + ".join(tab_str[::-1])
79fd59afe84c1bd12e3417b9195514664d1bce20
1,078
import re def tokens(s): """Return a list of strings containing individual words from string s. This function splits on whitespace transitions, and captures apostrophes (for contractions). >>> tokens("I'm fine, how are you?") ["I'm", 'fine', 'how', 'are', 'you'] """ words = re.findall(r"\b[\w']+\b", s) return words
aee0b6fad2f9107c893496f1f3807e80c9d2e44b
1,079
def median(list_in): """ Calculates the median of the data :param list_in: A list :return: float """ list_in.sort() half = int(len(list_in) / 2) if len(list_in) % 2 != 0: return float(list_in[half]) elif len(list_in) % 2 ==0: value = (list_in[half - 1] + list_in[half]) / 2 return float(value)
261487551098b80986cbfb8e4cd28279649ac456
1,080
from typing import Dict from typing import List from typing import Set from typing import Any from functools import reduce def get_set_from_dict_from_dict( instance: Dict[str, Dict[str, List]], field: str ) -> Set[Any]: """ Format of template field within payload Function gets field from instance-dict, which is a dict again. The values of these dicts have to be joined in a set. """ cml = instance.get(field) if cml: return reduce(lambda i1, i2: i1 | i2, [set(values) for values in cml.values()]) else: return set()
75ee6f4d46a4f57012e76b0f02fb20f629b6bf60
1,082
def Get_Unread_Messages(service, userId): """Retrieves all unread messages with attachments, returns list of message ids. Args: service: Authorized Gmail API service instance. userId: User's email address. The special value "me". can be used to indicate the authenticated user. """ message_list = [] message_ids = service.users().messages().list(userId=userId, labelIds='INBOX', alt="json", q='is:unread has:attachment').execute() if message_ids['resultSizeEstimate'] > 0: for message in message_ids['messages']: message_list.append(message['id']) return message_list
2aa28ff1aa093754bd293a831be2dada0e629801
1,083
def potatoes(p0, w0, p1): """ - p1/100 = water1 / water1 + (1 - p0/100) * w0 => water1 = w0 * p1/100 * (1 - p0/100) / (1 - p1/100) - dry = w0 * (1 - p0/100) - w1 = water1 + dry = w0 * (100 - p0) / (100 - p1) Example: 98/100 = water1 / water1 + (1- 99/100) * 100 water1 = 49 w1 = 49 + 1 = 50 """ w1 = w0 * (100 - p0) / (100 - p1) return int(w1)
f2955a58db3a48c64b6acc4980e663f33332aeea
1,084
import re def get_sale(this_line, cattle, category): """Convert the input into a dictionary, with keys matching the CSV column headers in the scrape_util module. """ cattle = cattle.replace("MARKET","") cattle = cattle.replace(":","") cattle = cattle.strip().title() sale = {'cattle_cattle': cattle} if bool(re.search("TOWN", str(category))): for idx,title in enumerate(category): if title == "TOWN": sale['consignor_city'] = this_line[idx].strip().title() if title == "HEAD": head = this_line[idx] if '-' in head: head = head.split('-')[0] if '/' in head: head = head.split('/')[0] sale['cattle_head'] = head if title == "KIND": cattle = cattle + ' '+ this_line[idx].title() sale['cattle_cattle'] = cattle if title == "WEIGHT": sale['cattle_avg_weight'] = this_line[idx].replace(",","") if title == "PRICE": price = this_line[idx].replace("$","") price = price.replace(",","") if bool(re.search("Pairs", cattle)): sale['cattle_price'] = price else: sale['cattle_price_cwt'] = price else: sale={} sale = {k: v.strip() for k, v in sale.items() if v} return sale
f75e949558c9938a44f64ccce11bacce8d116e9f
1,085
import os import requests def get_request(term, destination, days_input, price_limit, food_preference): """ Fetches restaurant information from the Yelp API for a given meal term, meal attribute, destination, number of days of vacation, price limit, and food preference. Params: term (str) the specific meal, like "breakfast" destination (str) the requested destination, like "New York" days_input (int) the number of days of the vacation, like 3 price_limit (list) the requested list of prices to search going up to the price limit, like [1,2,3] (for $$$) food_preference (str) the requested food cuisine preferences, like "American, Chinese" Example: breakfast_list, lunch_list, dinner_list = get_request(term="breakfast",destination="New York", days_input=3, price_limit=[1,2,3], food_preference="American, Chinese") Returns the request for a specific meal through "meal_response". """ #ACQUIRE API KEY API_KEY = os.environ.get("YELP_API_KEY") #Endpoint and headers using API Key link_endpoint = 'https://api.yelp.com/v3/businesses/search' link_headers = {'Authorization': 'bearer %s' % API_KEY} #Read in the inputted parameters for a given meal meal_parameters = {'term': term, 'limit': days_input, # 1 breakfast per vacation day 'offset': 50, #basically lets you do pages 'price': price_limit, #can change this later 'radius': 10000, #Change later? 'categories': food_preference, 'location': destination, 'attributes': "good_for_" + term, } #Make a request to the Yelp API using the correct parameters meal_response = requests.get(url = link_endpoint, params = meal_parameters, headers = link_headers) print(meal_response) #Return the request return meal_response
df9b5a2534278963dc5fa0719db3f915ce8fcb8d
1,086
def relacao(lista): """Crie uma função que recebe uma lista de números reais e retorna uma outra lista de tamanho 3 em que (i) o primeiro elemento é a quantidade de números maiores que zero, (ii) o segundo elemento é a quantidade de números menores que zero e (iii) o último elemento é a quantidade de zeros da lista inicial. Args: lista (list): lista recebida para ser processada pela funcao Returns: list: lista com tamanho três na ordem (maiores, menores e iguais a zero) """ maior = menor = igual = 0 for i in lista: if i > 0: maior += 1 elif i < 0: menor += 1 else: igual += 1 return f'[{maior},{menor},{igual}]'
39e45d8221d5d5b7322ebec5aa3f761d9e2ef413
1,087
def fit_slice(fitter, sliceid, lbda_range=[5000, 8000], nslices=5, **kwargs): """ """ fitvalues = fitter.fit_slice(lbda_ranges=lbda_range, metaslices=nslices, sliceid=sliceid, **kwargs) return fitvalues
2d2b4b91b0ba3b0dca908d56e8b5184e5ae36b9e
1,088
def german_weekday_name(date): """Return the german weekday name for a given date.""" days = [u'Montag', u'Dienstag', u'Mittwoch', u'Donnerstag', u'Freitag', u'Samstag', u'Sonntag'] return days[date.weekday()]
7d2919c61438ec913abe38cccd924bb69f866655
1,089
def identity_func(x): """The identify (a.k.a. transparent) function that returns it's input as is.""" return x
06e0296c338d68663aa87d08b21f84919be3f85e
1,090
def make_choice_validator( choices, default_key=None, normalizer=None): """ Returns a callable that accepts the choices provided. Choices should be provided as a list of 2-tuples, where the first element is a string that should match user input (the key); the second being the value associated with the key. The callable by default will match, upon complete match the first value associated with the result will be returned. Partial matches are supported. If a default is provided, that value will be returned if the user provided input is empty, i.e. the value that is mapped to the empty string. Finally, a normalizer function can be passed. This normalizes all keys and validation value. """ def normalize_all(_choices): # normalize all the keys for easier comparison if normalizer: _choices = [(normalizer(key), value) for key, value in choices] return _choices choices = normalize_all(choices) def choice_validator(value): if normalizer: value = normalizer(value) if not value and default_key: value = choices[default_key][0] results = [] for choice, mapped in choices: if value == choice: return mapped if choice.startswith(value): results.append((choice, mapped)) if len(results) == 1: return results[0][1] elif not results: raise ValueError('Invalid choice.') else: raise ValueError( 'Choice ambiguous between (%s)' % ', '.join( k for k, v in normalize_all(results)) ) return choice_validator
65ac672f16a1031a9051bc4f6769c6b1b88db727
1,091
def gen_event_type_entry_str(event_type_name, event_type, event_config): """ return string like: {"cpu-cycles", PERF_TYPE_HARDWARE, PERF_COUNT_HW_CPU_CYCLES}, """ return '{"%s", %s, %s},\n' % (event_type_name, event_type, event_config)
ca89c19b45f182b8a7ae74ab76f3f42bddf46811
1,092
from pathlib import Path def get_force_charge() -> str: """ Gets the command object for the force charge command Returns: The command object as a json string """ force_charge = Path('force_charge.json').read_text() return force_charge
c67277c62664419c3b4a19ae57ea6de027c60416
1,093
def xor_columns(col, parity): """ XOR a column with the parity values from the state """ result = [] for i in range(len(col)): result.append(col[i] ^ parity[i]) return result
2eff4dbf3edf2b97410e7bef17c043a30b1f3aa8
1,094
def _non_string_elements(x): """ Simple helper to check that all values of x are string. Returns all non string elements as (position, element). :param x: Iterable :return: [(int, !String), ...] """ problems = [] for i in range(0, len(x)): if not isinstance(x[i], str): problems.append((i, x[i])) return problems
974715622949157693084823a52a88973b51d100
1,095
def config_entry_version_fixture(): """Define a config entry version fixture.""" return 2
cac78c1f02668c95ce918d6219cadd5f08ab21c9
1,096
def get_filename_pair(filename): """ Given the name of a VASF data file (*.rsd) or parameter file (*.rsp) return a tuple of (parameters_filename, data_filename). It doesn't matter if the filename is a fully qualified path or not. - assumes extensions are all caps or all lower """ param_filename = data_filename = filename[:-1] if filename[-1:].isupper(): data_filename += 'D' param_filename += 'P' else: data_filename += 'd' param_filename += 'p' return (param_filename, data_filename)
f6eb5a64cf472f230c5806447d9c2ee8ae43a71d
1,097