content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def _checker(word: dict): """checks if the 'word' dictionary is fine :param word: the node in the list of the text :type word: dict :return: if "f", "ref" and "sig" in word, returns true, else, returns false :rtype: bool """ if "f" in word and "ref" in word and "sig" in word: return True return False
ee6ec5a7ee393ddcbc97b13f6c09cdd9019fb1a6
709,680
def classified_unread_counts(): """ Unread counts return by helper.classify_unread_counts function. """ return { 'all_msg': 12, 'all_pms': 8, 'unread_topics': { (1000, 'Some general unread topic'): 3, (99, 'Some private unread topic'): 1 }, 'unread_pms': { 1: 2, 2: 1, }, 'unread_huddles': { frozenset({1001, 11, 12}): 3, frozenset({1001, 11, 12, 13}): 2 }, 'streams': { 1000: 3, 99: 1 } }
4d5e984641de88fd497b6c78891b7e6478bb8385
709,681
def to_n_class(digit_lst, data, labels): """to make a subset of MNIST dataset, which has particular digits Parameters ---------- digit_lst : list for example, [0,1,2] or [1, 5, 8] data : numpy.array, shape (n_samples, n_features) labels : numpy.array or list of str Returns ------- numpy.array, list of int """ if not set(digit_lst) <= set(range(10)): raise ValueError indices = [] new_labels = [] for i, x in enumerate(data): for digit in digit_lst: if labels[i] == str(digit): indices.append(i) new_labels.append(digit) return data[indices], new_labels
79652687ec0670ec00d67681711903ae01f4cc87
709,682
def format_value_with_percentage(original_value): """ Return a value in percentage format from an input argument, the original value """ percentage_value = "{0:.2%}".format(original_value) return percentage_value
78bfb753b974bc7cbe3ac96f58ee49251063d2e7
709,684
import numpy def get_Z_and_extent(topofile): """Get data from an ESRI ASCII file.""" f = open(topofile, "r") ncols = int(f.readline().split()[1]) nrows = int(f.readline().split()[1]) xllcorner = float(f.readline().split()[1]) yllcorner = float(f.readline().split()[1]) cellsize = float(f.readline().split()[1]) nodatavalue = float(f.readline().split()[1]) data = numpy.zeros((nrows, ncols), dtype=numpy.float64) for i in range(nrows): data[i, :] = f.readline().strip().split() f.close() extent = [xllcorner, xllcorner+ncols*cellsize, yllcorner, yllcorner+nrows*cellsize] return data, extent
e96db5c2ae4a0d6c94654d7ad29598c3231ec186
709,685
import torch def train_discrim(discrim, state_features, actions, optim, demostrations, settings): """demostractions: [state_features|actions] """ criterion = torch.nn.BCELoss() for _ in range(settings.VDB_UPDATE_NUM): learner = discrim(torch.cat([state_features, actions], dim=-1)) expert = discrim(demostrations) discrim_loss = criterion(learner, torch.ones( [len(state_features), 1])) + criterion( expert, torch.zeros(len(demostrations), 1)) optim.zero_grad() discrim_loss.backward() optim.step() expert_acc = ((discrim(demostrations) < 0.5).float()).mean() learner_acc = ((discrim(torch.cat([state_features, actions], dim=1)) > 0.5).float()).mean() return expert_acc, learner_acc
7e6c16fc396b371e92d3a04179eacb9cae63659c
709,686
def max_tb(collection): # pragma: no cover """Returns the maximum number of TB recorded in the collection""" max_TB = 0 for doc in collection.find({}).sort([('total_TB',-1)]).limit(1): max_TB = doc['total_TB'] return max_TB
bde417de0b38de7a7b5e4e3db8c05e87fa6c55ca
709,687
import time def datetime_to_timestamp(d): """convert a datetime object to seconds since Epoch. Args: d: a naive datetime object in default timezone Return: int, timestamp in seconds """ return int(time.mktime(d.timetuple()))
356ac090b0827d49e9929a7ef26041b26c6cc690
709,688
def decomposePath(path): """ :example: >>> decomposePath(None) >>> decomposePath("") >>> decomposePath(1) >>> decomposePath("truc") ('', 'truc', '', 'truc') >>> decomposePath("truc.txt") ('', 'truc', 'txt', 'truc.txt') >>> decomposePath("/home/truc.txt") ('/home/', 'truc', 'txt', 'truc.txt') >>> decomposePath("/home/truc.txt.bz2") ('/home/', 'truc.txt', 'bz2', 'truc.txt.bz2') >>> decomposePath("/truc.txt.bz2") ('/', 'truc.txt', 'bz2', 'truc.txt.bz2') >>> decomposePath("./truc.txt.bz2") ('./', 'truc.txt', 'bz2', 'truc.txt.bz2') >>> decomposePath(".truc.txt.bz2") ('', '.truc.txt', 'bz2', '.truc.txt.bz2') """ if path is None or type(path) is not str or len(path) == 0: return None filenameExt = path.split("/")[-1] dir = path[0:-len(filenameExt)] filename = ".".join(filenameExt.split(".")[0:-1]) ext = filenameExt.split(".")[-1] if len(filename) == 0 and len(ext) > 0: filename, ext = ext, filename return (dir, filename, ext, filenameExt)
7b45cfe64f631912fc56246f404ddbea51b9f1ec
709,689
def update_coverage(coverage, path, func, line, status): """Add to coverage the coverage status of a single line""" coverage[path] = coverage.get(path, {}) coverage[path][func] = coverage[path].get(func, {}) coverage[path][func][line] = coverage[path][func].get(line, status) coverage[path][func][line] = coverage[path][func][line].combine(status) return coverage
46e5a1e5c4ebba3a9483f90ada96a0f7f94d8c1d
709,690
def cross_product(v1, v2): """Calculate the cross product of 2 vectors as (x1 * y2 - x2 * y1).""" return v1.x * v2.y - v2.x * v1.y
871d803ef687bf80facf036549b4b2062f713994
709,691
def divisors(num): """ Takes a number and returns all divisors of the number, ordered least to greatest :param num: int :return: list (int) """ # Fill in the function and change the return statment. return 0
f15169b2672847294a219207f6022ad3e49338d2
709,692
def compare_distance(tree,target): """ Checks tree edit distance. Since every node has a unique position, we know that the node is the same when the positions are the same. Hence, a simple method of counting the number of edits one needs to do to create the target tree out of a given tree is equal to the number of positional differences. """ # check for positional overlap edit_value = 0 for node in target: node.found = False for node in tree: same_node = False for t_node in target: if node.pos[0] == t_node.pos[0] and node.pos[1] == t_node.pos[1]: same_node = True t_node.found = True if same_node == False: edit_value += 1 # count found for node in target: if not node.found: edit_value += 1 return edit_value
96b57e88b8e70dbb43231b56cbe7e9b7ebcfd10f
709,693
def header(name='peptide'): """ Parameters ---------- name Returns ------- """ with open('{}.pdb'.format(name), 'r') as f: file = f.read() model = file.find('\nMODEL') atom = file.find('\nATOM') if atom < 0: raise ValueError('no ATOM entries found in PDB') if model < 0: index = atom else: index = min(model, atom) return file[:index] + '\n'
84e75e34771b7c395ee36611c8d055ca1fdf67dc
709,694
def str2bytes(seq): """ Converts an string to a list of integers """ return map(ord,str(seq))
7afe8e40cd4133c59be673b537f2717591b093cf
709,695
import os def getsize(filename): """Return the size of a file, reported by os.stat().""" return os.stat(filename).st_size
f21bd048bf1fdbc80cdcbd4f14dba8f390439f74
709,697
def drop_path(input, drop_prob=0.0, training=False, scale_by_keep=True): """ Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). """ if drop_prob == 0.0 or not training: return input keep_prob = 1 - drop_prob shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets random_tensor = input.new_empty(shape).bernoulli_(keep_prob) if keep_prob > 0.0 and scale_by_keep: random_tensor.div_(keep_prob) return input * random_tensor
289ae545fa184bb459275685d3a2894e5219db2e
709,698
import os def envi_header(inputpath): """ Convert a envi binary/header path to a header, handling extensions Args: inputpath: path to envi binary file Returns: str: the header file associated with the input reference. """ if os.path.splitext(inputpath)[-1] == '.img' or os.path.splitext(inputpath)[-1] == '.dat' or os.path.splitext(inputpath)[-1] == '.raw': # headers could be at either filename.img.hdr or filename.hdr. Check both, return the one that exists if it # does, if not return the latter (new file creation presumed). hdrfile = os.path.splitext(inputpath)[0] + '.hdr' if os.path.isfile(hdrfile): return hdrfile elif os.path.isfile(inputpath + '.hdr'): return inputpath + '.hdr' return hdrfile elif os.path.splitext(inputpath)[-1] == '.hdr': return inputpath else: return inputpath + '.hdr'
45df7507017676648cd4fae955da26916bbf4738
709,700
import socket def basic_checks(server,port): """Perform basics checks on given host""" sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM) # 2 seconds timeout sock.settimeout(2) return sock.connect_ex((server,int(port))) == 0
4a31521089feb2c178bb5202fa818804dfe87142
709,703
def ordToString(ordList): """Use this function to convert ord values to strings.""" newStrList = [] cstr = "" for cint in ordList: cstr += chr(cint) if cint == 44: newStrList.append(cstr[:-1]) cstr = "" return newStrList
5a836f7fe34803744de90aa2608e3d99a081c7ff
709,704
import re def _xfsdump_output(data): """ Parse CLI output of the xfsdump utility. """ out = {} summary = [] summary_block = False for line in [l.strip() for l in data.split("\n") if l.strip()]: line = re.sub("^xfsdump: ", "", line) if line.startswith("session id:"): out["Session ID"] = line.split(" ")[-1] elif line.startswith("session label:"): out["Session label"] = re.sub("^session label: ", "", line) elif line.startswith("media file size"): out["Media size"] = re.sub(r"^media file size\s+", "", line) elif line.startswith("dump complete:"): out["Dump complete"] = re.sub(r"^dump complete:\s+", "", line) elif line.startswith("Dump Status:"): out["Status"] = re.sub(r"^Dump Status:\s+", "", line) elif line.startswith("Dump Summary:"): summary_block = True continue if line.startswith(" ") and summary_block: summary.append(line.strip()) elif not line.startswith(" ") and summary_block: summary_block = False if summary: out["Summary"] = " ".join(summary) return out
dbc7fbf9dced99b83a7dc5917c473a1dee16d749
709,705
def csstext(text: str, cls: str, span: bool=False, header: bool=False) -> str: """ Custom build HTML text element. """ if span: tag = 'span' elif header: tag = 'h1' else: tag = 'p' return f'<{tag} class="{cls}">{str(text)}</{tag}>'
0833fd9d83143e09b5c234e193a8e53ef653112b
709,706
def trans_exam_list_to_colum(example_list, headers=None): """ 将example列表转换成以列表示的形式,用于适配输出附加信息 :param example_list: example 列表 :param headers: 需要的属性,默认为("question", "answer", "yes_or_no") :return: {header1:[...],header2:[...],...} """ if headers is None: headers = ("question", "answer", "yes_or_no") result = {} for header in headers: result[header] = [] for example in example_list: for header in headers: result[header].append(getattr(example, header, "")) return result
ff5a2e5f6e27ce0a32717e55ba35dbd864a11dbb
709,707
def parseParams(opt): """Parse a set of name=value parameters in the input value. Return list of (name,value) pairs. Raise ValueError if a parameter is badly formatted. """ params = [] for nameval in opt: try: name, val = nameval.split("=") except ValueError: raise ValueError("Bad name=value format for '%s'" % nameval) params.append((name, val)) return params
b932f74c8e5502ebdd7a8749c2de4b30921d518b
709,708
def _get_name(dist): """Attempts to get a distribution's short name, excluding the name scope.""" return getattr(dist, 'parameters', {}).get('name', dist.name)
fd57e523c1a84a36f9ed56236e4b8db1e887575c
709,709
def GenerateConfig(context): """Generates configuration.""" key_ring = { 'name': 'keyRing', 'type': 'gcp-types/cloudkms-v1:projects.locations.keyRings', 'properties': { 'parent': 'projects/' + context.env['project'] + '/locations/' + context.properties['region'], 'keyRingId': context.env['deployment'] + '-key-ring' } } crypto_key = { 'name': 'cryptoKey', 'type': 'gcp-types/cloudkms-v1:projects.locations.keyRings.cryptoKeys', 'properties': { 'parent': '$(ref.keyRing.name)', 'cryptoKeyId': context.env['deployment'] + '-crypto-key', 'purpose': 'ENCRYPT_DECRYPT' } } resources = [key_ring, crypto_key] outputs = [{ 'name': 'primaryVersion', 'value': '$(ref.cryptoKey.primary)' }] return { 'resources': resources, 'outputs': outputs }
257b7217c1a08bba46866aff0b7faa1a03fe7fdc
709,710
def _remove_none_from_post_data_additional_rules_list(json): """ removes hidden field value from json field "additional_rules" list, which is there to ensure field exists for editing purposes :param json: this is data that is going to be posted """ data = json additional_rules = json.get("additional_rules", None) if additional_rules and "None" in additional_rules: new_additional_rules = [] for rule in additional_rules: if rule != "None": new_additional_rules.append(rule) data["additional_rules"] = new_additional_rules return data
c82aa568f82ba4abcb8f4e6f9c770969277d078f
709,711
def NO_MERGE(writer, segments): """This policy does not merge any existing segments. """ return segments
0742365f30d59cb219ac60483b867180bd910ba8
709,712
import os def package_files(directory): """package_files recursive method which will lets you set the package_data parameter in the setup call. """ paths = [] for (path, _, filenames) in os.walk(directory): for filename in filenames: paths.append(os.path.join('..', path, filename)) return paths
e043de9a9e8ed9092f933df00b167b092ba6abaa
709,713
def transplant(root, u, v): """ 注意, 这里要返回root, 不然修改不了 """ if u.parent == None: root = v elif u.parent.left == u: u.parent.left = v else: u.parent.right = v if v: v.parent = u.parent return root
cadf0433399e428596d1d0d4ab200e4d79285d21
709,714
import subprocess def pr_branches() -> list[str]: """List of branches that start with 'pr-'""" out = subprocess.run( [ "git", "for-each-ref", "--shell", '--format="%(refname:strip=3)"', "refs/remotes/origin/pr-*", ], capture_output=True, ) branches = out.stdout.decode().splitlines() return [branch.replace('"', "").replace("'", "") for branch in branches]
f144d2546ef59cb392f4ad1226c2246384bdfd99
709,715
from typing import Union def parse_boolean(val: str) -> Union[str, bool]: """Try to parse a string into boolean. The string is returned as-is if it does not look like a boolean value. """ val = val.lower() if val in ('y', 'yes', 't', 'true', 'on', '1'): return True if val in ('n', 'no', 'f', 'false', 'off', '0'): return False return val
e2cbda5a849e1166e0f2a3953220c93d1f3ba119
709,716
def supports_color(stream) -> bool: # type: ignore """Determine whether an output stream (e.g. stdout/stderr) supports displaying colored text. A stream that is redirected to a file does not support color. """ return stream.isatty() and hasattr(stream, "isatty")
4a427d6725206ef33b3f4da0ace6f2d6c3db78a9
709,717
import os def get_current_version_name(): """Returns the version of the current instance. If this is version "v1" of module "module5" for app "my-app", this function will return "v1". """ return os.environ['CURRENT_VERSION_ID'].split('.')[0]
cbd7fdbb9af4990e32130f2aa3af0cfe8bf59816
709,718
import re def safe_htcondor_attribute(attribute: str) -> str: """Convert input attribute name into a valid HTCondor attribute name HTCondor ClassAd attribute names consist only of alphanumeric characters or underscores. It is not clearly documented, but the alphanumeric characters are probably restricted to ASCII. Attribute names created from multiple words typically capitalize the first letter in each word for readability, although all comparisions are case-insensitive. e.g., "central-manager" -> "CentralManager" Args: attribute: a string representing the name of an attribute Returns: The attribute name stripped of invalid characters and re-capitalized in the manner typical of HTCondor ClassAd attributes. Raises: None """ # splitting by invalid characters removes them from the resulting array split_attr = re.split(r"[^\w]", attribute, flags=re.ASCII) safe_attr = "".join([word.capitalize() for word in split_attr if word]) return safe_attr
7a4dda539b2379120e68737d72a80226c45f5602
709,720
def make_csv(headers, data): """ Creates a CSV given a set of headers and a list of database query results :param headers: A list containg the first row of the CSV :param data: The list of query results from the Database :returns: A str containing a csv of the query results """ # Create a list where each entry is one row of the CSV file, starting # with the headers csvRows =[','.join(headers),] # Iterate through the provided data and create the rest of the CSV's rows for datum in data: currentRow = '' for header in headers: # Get this rows value for the given header val = getattr(datum, header) if type(val) is str: # Escape the strings currentRow += '"' + val + '",' elif type(val) is float: # Don't Escape the floats currentRow += str(val) + ',' else: # If it is empty and a place holder currentRow += ',' csvRows.append(currentRow[:-1]) # Combine all of the rows into a single single string and return it. return "\n".join(csvRows)
5101d53de8dd09d8ebe743d77d71bff9aeb26334
709,721
from typing import Tuple def extract_value_from_config( config: dict, keys: Tuple[str, ...], ): """ Traverse a config dictionary to get some hyper-parameter's value. Parameters ---------- config A config dictionary. keys The possible names of a hyper-parameter. Returns ------- The hyper-parameter value. """ result = [] for k, v in config.items(): if k in keys: result.append(v) elif isinstance(v, dict): result += extract_value_from_config(v, keys) else: pass return result
d545d4c9298c74776ec52fb6b2c8d54d0e653489
709,722
import numpy def boundaryStats(a): """ Returns the minimum and maximum values of a only on the boundaries of the array. """ amin = numpy.amin(a[0,:]) amin = min(amin, numpy.amin(a[1:,-1])) amin = min(amin, numpy.amin(a[-1,:-1])) amin = min(amin, numpy.amin(a[1:-1,0])) amax = numpy.amax(a[0,:]) amax = max(amax, numpy.amax(a[1:,-1])) amax = max(amax, numpy.amax(a[-1,:-1])) amax = max(amax, numpy.amax(a[1:-1,0])) return amin, amax
6c007c6cf2c7c5774ca74365be8f63094864d962
709,723
def vision_matched_template_get_pose(template_match): """ Get the pose of a previously detected template match. Use list operations to get specific entries, otherwise returns value of first entry. Parameters: template_match (List[MatchedTemplate3D] or MatchedTemplate3D): The template match(s) Return (Pose): The pose of the template match """ if isinstance(template_match,list): template_match = template_match[0] return template_match.pose.pose.pose
b854da7a085934f4f3aba510e76852fb8c0a440a
709,724
def no_zero(t): """ This function replaces all zeros in a tensor with ones. This allows us to take the logarithm and then sum over all values in the matrix. Args: t: tensor to be replaced returns: t: tensor with ones instead of zeros. """ t[t==0] = 1. return t
8119d1859dc8b248f5bb09b7cc0fc3b492d9b7bd
709,726
def get_vocabulary(query_tree): """Extracts the normalized search terms from the leaf nodes of a parsed query to construct the vocabulary for the text vectorization. Arguments --------- query_tree: pythonds.trees.BinaryTree The binary tree object representing a parsed search query. Each leaf node is a search term and internal nodes represent boolean operations. See parse_query() for details. Returns ------- vocabulary: list List of strings representing unique normalized search terms. """ def _getleafnodes(node): terms = [] if node.isLeaf(): return terms + [node.normedterm] elif node.leftChild and not node.rightChild: return terms + _getleafnodes(node.getLeftChild()) elif node.rightChild and not node.leftChild: return terms + _getleafnodes(node.getRightChild()) else: # has two children return terms + _getleafnodes(node.getLeftChild()) \ + _getleafnodes(node.getRightChild()) # extract terms from the leaf nodes of the query object. terms = _getleafnodes(query_tree) # remove duplicates. vocabulary = list(set(terms)) return vocabulary
bd03f4894cd3f9a7964196bfb163335f84a048d7
709,728
def find_point_in_section_list(point, section_list): """Returns the start of the section the given point belongs to. The given list is assumed to contain start points of consecutive sections, except for the final point, assumed to be the end point of the last section. For example, the list [5, 8, 30, 31] is interpreted as the following list of sections: [5-8), [8-30), [30-31], so the points -32, 4.5, 32 and 100 all match no section, while 5 and 7.5 match [5-8) and so for them the function returns 5, and 30, 30.7 and 31 all match [30-31]. Parameters --------- point : float The point for which to match a section. section_list : sortedcontainers.SortedList A list of start points of consecutive sections. Returns ------- float The start of the section the given point belongs to. None if no match was found. Example ------- >>> from sortedcontainers import SortedList >>> seclist = SortedList([5, 8, 30, 31]) >>> find_point_in_section_list(4, seclist) >>> find_point_in_section_list(5, seclist) 5 >>> find_point_in_section_list(27, seclist) 8 >>> find_point_in_section_list(31, seclist) 30 """ if point < section_list[0] or point > section_list[-1]: return None if point in section_list: if point == section_list[-1]: return section_list[-2] ind = section_list.bisect(point)-1 if ind == 0: return section_list[0] return section_list[ind] try: ind = section_list.bisect(point) return section_list[ind-1] except IndexError: return None
47d5cda15b140ba8505ee658fd46ab090b2fda8a
709,729
import os import types def generate_module(file_allocator, name): """ Generate an in-memory module from a generated Python implementation. """ assert name in file_allocator.allocated_files f = file_allocator.allocated_files[name] f.seek(0) data = f.read() modname, _ = os.path.splitext(name) d = {} eval(compile(data, name, "exec"), d, d) m = types.ModuleType(modname) vars(m).update(d) return m
beab4cdf12fcdfeacef9f8a8607e995b771d6012
709,730
def choose(population, sample): """ Returns ``population`` choose ``sample``, given by: n! / k!(n-k)!, where n == ``population`` and k == ``sample``. """ if sample > population: return 0 s = max(sample, population - sample) assert s <= population assert population > -1 if s == population: return 1 numerator = 1 denominator = 1 for i in range(s+1, population + 1): numerator *= i denominator *= (i - s) return numerator/denominator
659eac683cae737888df74c0db21aa3ece746b33
709,731
def eea(m, n): """ Compute numbers a, b such that a*m + b*n = gcd(m, n) using the Extended Euclidean algorithm. """ p, q, r, s = 1, 0, 0, 1 while n != 0: k = m // n m, n, p, q, r, s = n, m - k*n, q, p - k*q, s, r - k*s return (p, r)
56e1c59ac3a51e26d416fe5c65cf6612dbe56b8c
709,733
import csv def parse_kinetics_splits(level): """Parse Kinetics-400 dataset into "train", "val", "test" splits. Args: level (int): Directory level of data. 1 for the single-level directory, 2 for the two-level directory. Returns: list: "train", "val", "test" splits of Kinetics-400. """ def convert_label(s, keep_whitespaces=False): """Convert label name to a formal string. Remove redundant '"' and convert whitespace to '_'. Args: s (str): String to be converted. keep_whitespaces(bool): Whether to keep whitespace. Default: False. Returns: str: Converted string. """ if not keep_whitespaces: return s.replace('"', '').replace(' ', '_') else: return s.replace('"', '') def line_to_map(x, test=False): """A function to map line string to vid and label. Args: x (str): A single line from Kinetics-400 csv file. test (bool): Indicate whether the line comes from test annotation file. Returns: tuple[str, str]: (vid, label), vid is the video id, label is the video label. """ if test: # vid = f'{x[0]}_{int(x[1]):06d}_{int(x[2]):06d}' vid = f'{x[1]}_{int(float(x[2])):06d}_{int(float(x[3])):06d}' label = -1 # label unknown return vid, label else: vid = f'{x[1]}_{int(float(x[2])):06d}_{int(float(x[3])):06d}' if level == 2: vid = f'{convert_label(x[0])}/{vid}' else: assert level == 1 label = class_mapping[convert_label(x[0])] return vid, label train_file = 'data/kinetics400/annotations/kinetics_train.csv' val_file = 'data/kinetics400/annotations/kinetics_val.csv' test_file = 'data/kinetics400/annotations/kinetics_test.csv' csv_reader = csv.reader(open(train_file)) # skip the first line next(csv_reader) labels_sorted = sorted(set([convert_label(row[0]) for row in csv_reader])) class_mapping = {label: i for i, label in enumerate(labels_sorted)} csv_reader = csv.reader(open(train_file)) next(csv_reader) train_list = [line_to_map(x) for x in csv_reader] csv_reader = csv.reader(open(val_file)) next(csv_reader) val_list = [line_to_map(x) for x in csv_reader] csv_reader = csv.reader(open(test_file)) next(csv_reader) test_list = [line_to_map(x, test=True) for x in csv_reader] splits = ((train_list, val_list, test_list), ) return splits
ee2521919f9f9c3f499cd28bc6003528eb402d2b
709,736
import uuid def ticket() -> str: """生成请求饿百接口所需的ticket参数""" return str(uuid.uuid1()).upper()
aaf1135d6ef5e61aa65960c5c38007848cbd0b17
709,737
def get_clients( wlc, *vargs, **kvargs ): """ create a single dictionary containing information about all associated stations. """ rsp = wlc.rpc.get_stat_user_session_status() ret_data = {} for session in rsp.findall('.//USER-SESSION-STATUS'): locstat = session.find('.//USER-LOCATION-MEMBER') ret_data[session.get('mac-addr')] = dict(session.attrib) ret_data[session.get('mac-addr')].update(locstat.attrib) return ret_data
c4ab5941033632d7f2b95bc23878f0464d12adb7
709,738
def maximum_value(tab): """ brief: return maximum value of the list args: tab: a list of numeric value expects at leas one positive value return: the max value of the list the index of the max value raises: ValueError if expected a list as input ValueError if no positive value found """ if not(isinstance(tab, list)): raise ValueError('Expected a list as input') valMax = 0.0 valMaxIndex = -1; nPositiveValues = 0 for i in range(len(tab)): if tab[i] >= 0 and tab[i] > valMax: valMax = float(tab[i]) valMaxIndex = i nPositiveValues += 1 if nPositiveValues <= 0: raise ValueError('No positive value found') return valMax, valMaxIndex
1c31daf3a953a9d781bc48378ef53323313dc22a
709,739
import math def dsh( incidence1: float, solar_az1: float, incidence2: float, solar_az2: float ): """Returns the Shadow-Tip Distance (dsh) as detailed in Becker et al.(2015). The input angles are assumed to be in radians. This is defined as the distance between the tips of the shadows in the two images for a hypothetical vertical post of unit height. The "shadow length" describes the shadow of a hypothetical pole so it applies whether there are actually shadows in the image or not. It's a simple and consistent geometrical way to quantify the difference in illumination. This quantity is computed analogously to dp. """ def shx(inc: float, sunazgnd: float): return -1 * math.tan(inc) * math.cos(sunazgnd) def shy(inc: float, sunazgnd: float): return math.tan(inc) * math.sin(sunazgnd) shx1 = shx(incidence1, solar_az1) shx2 = shx(incidence2, solar_az2) shy1 = shy(incidence1, solar_az1) shy2 = shy(incidence2, solar_az2) return math.sqrt(math.pow(shx1 - shx2, 2) + math.pow(shy1 - shy2, 2))
5aef1c9d7ffeb3e8534568a53cf537d26d97324a
709,740
def instantiate(class_name, *args, **kwargs): """Helper to dynamically instantiate a class from a name.""" split_name = class_name.split(".") module_name = split_name[0] class_name = ".".join(split_name[1:]) module = __import__(module_name) class_ = getattr(module, class_name) return class_(*args, **kwargs)
d5906c835de9c2e86fbe3c15a9236662d6c7815d
709,741
import six def _stringcoll(coll): """ Predicate function to determine whether COLL is a non-empty collection (list/tuple) containing only strings. Arguments: - `coll`:* Return: bool Exceptions: None """ if isinstance(coll, (list, tuple)) and coll: return len([s for s in coll if isinstance(s, six.string_types)]) == len(coll) return False
9490a973900e230f70fea112f250cfe29be3a8bc
709,742
def __check_complete_list(list_, nb_max, def_value): """ make sure the list is long enough complete with default value if not :param list_: list to check :param nb_max: maximum length of the list :param def_value: if list too small, completes it with this value :return: boolean, False if the list is too long """ if len(list_) <= nb_max: list_.extend([def_value] * (nb_max - len(list_))) return True else: return False
9d439cd3eeea04e7a3e0e59aa4fe0bbb875bdfe4
709,743
def verify_parentheses(parentheses_string: str) -> bool: """Takes input string of only '{},[],()' and evaluates to True if valid.""" open_parentheses = [] valid_parentheses_set = {'(', ')', '[', ']', '{', '}'} parentheses_pairs = { ')' : '(', ']' : '[', '}' : '{' } if len(parentheses_string) % 2 != 0: return False for character in parentheses_string: if character not in valid_parentheses_set: raise ValueError("Only parentheses may be part of input string.") if character in {'(', '[', '{'}: open_parentheses.append(character) if character in {')', ']', '}'}: if len(open_parentheses) == 0: return False elif open_parentheses[-1] != parentheses_pairs[character]: return False del open_parentheses[-1] if len(open_parentheses) > 0: return False return True
2e2c07314d474b582f12af8cf53a311c0fa323c1
709,745
import re def compute_dict(file_path): """Computes the dict for a file whose path is file_path""" file_dict = {} with open(file_path, encoding = 'utf8') as fin: for line in fin: line = line.strip() txt = re.sub('([^a-zA-Z0-9\s]+)',' \\1 ',line) txt = re.sub('([\s]+)',' ',txt) words = txt.split(" ") for word in words: w = str(word) if(w not in file_dict): file_dict[w] = 1 else: file_dict[w] = file_dict[w] + 1 return file_dict
821e29181aad781279b27174be0fd7458b60481f
709,747
def isMatch(s, p): """ Perform regular simple expression matching Given an input string s and a pattern p, run regular expression matching with support for '.' and '*'. Parameters ---------- s : str The string to match. p : str The pattern to match. Returns ------- bool Was it a match or not. """ dp = [[False] * (len(p) + 1) for _ in range(len(s) + 1)] dp[0][0] = True # The only way to match a length zero string # is to have a pattern of all *'s. for ii in range(1, len(p)): if p[ii] == "*" and dp[0][ii-1]: dp[0][ii + 1] = True for ii in range(len(s)): for jj in range(len(p)): # Matching a single caracter c or '.'. if p[jj] in {s[ii], '.'}: dp[ii+1][jj+1] = dp[ii][jj] elif p[jj] == '*': # Double **, which is equivalent to * if p[jj-1] not in {s[ii], '.'}: dp[ii+1][jj+1] = dp[ii+1][jj-1] # We can match .* or c* multiple times, once, or zero # times (respective clauses in the or's) else: dp[ii+1][jj+1] = dp[ii][jj+1] or dp[ii+1][jj] or dp[ii+1][jj-1] return dp[-1][-1]
92cd3171afeb73c6a58bbcd3d3ea6d707401cb09
709,748
from typing import Optional def convert_postgres_array_as_string_to_list(array_as_string: str) -> Optional[list]: """ Postgres arrays are stored in CSVs as strings. Elasticsearch is able to handle lists of items, but needs to be passed a list instead of a string. In the case of an empty array, return null. For example, "{this,is,a,postgres,array}" -> ["this", "is", "a", "postgres", "array"]. """ return array_as_string[1:-1].split(",") if len(array_as_string) > 2 else None
cc64fe8e0cc765624f80abc3900985a443f76792
709,749
def add_prefix(key): """Dummy key_function for testing index code.""" return "id_" + key
96dda0bd57b4eb89f17c8bb69ad48e3e1675a470
709,750
def binaryToString(binary): """ 从二进制字符串转为 UTF-8 字符串 """ index = 0 string = [] rec = lambda x, i: x[2:8] + (rec(x[8:], i - 1) if i > 1 else '') if x else '' fun = lambda x, i: x[i + 1:8] + rec(x[8:], i - 1) while index + 1 < len(binary): chartype = binary[index:].index('0') # 存放字符所占字节数,一个字节的字符会存为0 length = chartype * 8 if chartype else 8 string.append(chr(int(fun(binary[index:index + length], chartype), 2))) index += length return ''.join(string)
2044109d573abe7c9428b64b289b5aa82ec4d624
709,751
import functools import logging def disable_log_warning(fun): """Temporarily set FTP server's logging level to ERROR.""" @functools.wraps(fun) def wrapper(self, *args, **kwargs): logger = logging.getLogger('pyftpdlib') level = logger.getEffectiveLevel() logger.setLevel(logging.ERROR) try: return fun(self, *args, **kwargs) finally: logger.setLevel(level) return wrapper
6990a2a1a60ea5a24e4d3ac5c5e7fbf443825e48
709,752
import json import operator def my_subs_helper(s): """Helper function to handle badly formed JSON stored in the database""" try: return {'time_created':s.time_created, 'json_obj':sorted(json.loads(s.json_data).iteritems(), key=operator.itemgetter(0)), 'plain_json_obj':json.dumps(json.loads(s.json_data)),'id':s.id, 'json_score_data':json.dumps(s.json_score_data)} except ValueError: return {'time_created':s.time_created, 'json_obj':"__ERROR__", 'plain_json_obj':"__ERROR__", 'id':s.id}
4b649d865c3a99f89111baa694df4902e65243e6
709,753
def add_to_codetree(tword,codetree,freq=1): """ Adds one tuple-word to tree structure - one node per symbol word end in the tree characterized by node[0]>0 """ unique=0 for pos in range(len(tword)): s = tword[pos] if s not in codetree: codetree[s] = [0,{}] unique+=1 codetree[s][0] += freq codetree = codetree[s][1] return unique
e92a48f112e7a774bed3b125509f7f64dce0a7ec
709,754
def remove_from_dict(obj, keys=list(), keep_keys=True): """ Prune a class or dictionary of all but keys (keep_keys=True). Prune a class or dictionary of specified keys.(keep_keys=False). """ if type(obj) == dict: items = list(obj.items()) elif isinstance(obj, dict): items = list(obj.items()) else: items = list(obj.__dict__.items()) if keep_keys: return {k: v for k, v in items if k in keys} else: return {k: v for k, v in items if k not in keys}
b1d9a2bd17269e079ce136cc464060fc47fe5906
709,755
def stringToTupleOfFloats(s): """ Converts s to a tuple @param s: string @return: tuple represented by s """ ans = [] for i in s.strip("()").split(","): if i.strip() != "": if i == "null": ans.append(None) else: ans.append(float(i)) return tuple(ans)
7eec23232f884035b12c6498f1e68616e4580878
709,756
import sys def factorial(x): """factorial(x) -> Integral "Find x!. Raise a ValueError if x is negative or non-integral.""" if isinstance(x, float): fl = int(x) if fl != x: raise ValueError("float arguments must be integral") x = fl if x > sys.maxsize: raise OverflowError("Too large for a factorial") if x <= 100: if x < 0: raise ValueError("x must be >= 0") res = 1 for i in range(2, x + 1): res *= i return res # Experimentally this gap seems good gap = max(100, x >> 7) def _fac_odd(low, high): if low + gap >= high: t = 1 for i in range(low, high, 2): t *= i return t mid = ((low + high) >> 1) | 1 return _fac_odd(low, mid) * _fac_odd(mid, high) def _fac1(x): if x <= 2: return 1, 1, x - 1 x2 = x >> 1 f, g, shift = _fac1(x2) g *= _fac_odd((x2 + 1) | 1, x + 1) return (f * g, g, shift + x2) res, _, shift = _fac1(x) return res << shift
664cc8e0e215f089bbc57fec68553d788305e4c0
709,757
import json def read_json(file_name): """Read json from file.""" with open(file_name) as f: return json.load(f)
2eccab7dddb1c1038de737879c465f293a00e5de
709,758
def _decode_end(_fp): """Decode the end tag, which has no data in the file, returning 0. :type _fp: A binary `file object` :rtype: int """ return 0
5e8da3585dda0b9c3c7cd428b7e1606e585e15c6
709,759
def get_camelcase_name_chunks(name): """ Given a name, get its parts. E.g: maxCount -> ["max", "count"] """ out = [] out_str = "" for c in name: if c.isupper(): if out_str: out.append(out_str) out_str = c.lower() else: out_str += c out.append(out_str) return out
134a8b1d98af35f185b37c999fbf499d18bf76c5
709,760
def orb_scf_input(sdmc): """ find the scf inputs used to generate sdmc """ myinputs = None # this is the goal sdep = 'dependencies' # string representation of the dependencies entry # step 1: find the p2q simulation id p2q_id = None for key in sdmc[sdep].keys(): if sdmc[sdep][key].result_names[0] == 'orbitals': p2q_id = key # end if # end for dep # step 2: find the nscf simulation nscf_id_list = sdmc[sdep][p2q_id]['sim'][sdep].keys() assert len(nscf_id_list) == 1 nscf_id = nscf_id_list[0] nscf = sdmc[sdep][p2q_id]['sim'][sdep][nscf_id] myinputs = nscf['sim']['input'] # step 3: find the scf simulation calc = myinputs['control']['calculation'] if (calc=='scf'): # scf may actually be the scf simulation pass # myinputs is already set elif (calc=='nscf'): # if nscf is not the scf, then we need to go deeper scf_id = nscf['sim'][sdep].keys()[0] scf = nscf['sim'][sdep][scf_id] myinputs = scf['sim']['input'] # this is it! scalc = myinputs['control']['calculation'] if scalc != 'scf': RuntimeError('nscf depends on %s instead of scf'%scalc) # end if else: raise RuntimeError('unknown simulation type %s'%calc) # end if return myinputs.to_dict()
c319693e9673edf540615025baf5b5199c5e27a3
709,762
def is_success(code): """ Returns the expected response codes for HTTP GET requests :param code: HTTP response codes :type code: int """ if (200 <= code < 300) or code in [404, 500]: return True return False
fa502b4989d80edc6e1c6c717b6fe1347f99990d
709,763
import json def get_node_to_srn_mapping(match_config_filename): """ Returns the node-to-srn map from match_conf.json """ with open(match_config_filename) as config_file: config_json = json.loads(config_file.read()) if "node_to_srn_mapping" in config_json: return config_json["node_to_srn_mapping"] else: node_to_srn = {} for node_info in config_json["NodeData"]: node_id = node_info["TrafficNode"] srn_num = node_info["srn_number"] node_to_srn[node_id] = srn_num return node_to_srn
37bf2f266f4e5163cc4d6e9290a8eaf17e220cd3
709,765
def nest_dictionary(flat_dict, separator): """ Nests a given flat dictionary. Nested keys are created by splitting given keys around the `separator`. """ nested_dict = {} for key, val in flat_dict.items(): split_key = key.split(separator) act_dict = nested_dict final_key = split_key.pop() for new_key in split_key: if not new_key in act_dict: act_dict[new_key] = {} act_dict = act_dict[new_key] act_dict[final_key] = val return nested_dict
f5b8649d916055fa5911fd1f80a8532e5dbee274
709,766
def list_a_minus_b(list1, list2): """Given two lists, A and B, returns A-B.""" return filter(lambda x: x not in list2, list1)
8fbac6452077ef7cf73e0625303822a35d0869c3
709,767
def gt_dosage(gt): """Convert unphased genotype to dosage""" x = gt.split(b'/') return int(x[0])+int(x[1])
819fc9beb834f57e44bcb0ac3e1d3c664c7efd42
709,768
import csv import re import sys def read_mapping_file(map_file): """ Mappings are simply a CSV file with three columns. The first is a string to be matched against an entry description. The second is the payee against which such entries should be posted. The third is the account against which such entries should be posted. If the match string begins and ends with '/' it is taken to be a regular expression. """ mappings = [] with open(map_file, "r", encoding='utf-8', newline='') as f: map_reader = csv.reader(f) for row in map_reader: if len(row) > 1: pattern = row[0].strip() payee = row[1].strip() account = row[2].strip() tags = row[3:] if pattern.startswith('/') and pattern.endswith('/'): try: pattern = re.compile(pattern[1:-1]) except re.error as e: print("Invalid regex '{0}' in '{1}': {2}" .format(pattern, map_file, e), file=sys.stderr) sys.exit(1) mappings.append((pattern, payee, account, tags)) return mappings
e72ceb08daac0a12a426062f95cfa06776cfdedd
709,769
import os def GetEnvironFallback(var_list, default): """Look up a key in the environment, with fallback to secondary keys and finally falling back to a default value.""" for var in var_list: if var in os.environ: return os.environ[var] return default
1b9cad3c46264c089f250ccb19119cff8cacd0d1
709,770
def task_migrate(): """Create django databases""" return { 'actions': ['''cd CCwebsite && python3 manage.py migrate'''] }
d0d146c2e628abbe33714ae0ff6a546aab9842cc
709,771
import os def print_header(args, argv, preamble='CIFAR10', printfn=print, log=open(os.devnull, 'w'), first=('model','dataset','epoch','batchsize','resume','out')): """ Prints the arguments and header, and returns a logging print function """ def logprint(*args, file=log, **kwargs): if printfn: printfn(*args, **kwargs) print(*args, file=file, **kwargs) file.flush() vargs = vars(args) args_sorted = sorted(vargs.items()) logprint('{' + ', '.join("'{}':{}".format(k,repr(v)) for k,v, in args_sorted) + '}') logprint(' '.join(argv)) logprint('') logprint(preamble) logprint('') logprint('Arguments: ') def print_arg(arg): logprint(' {:20}: {},'.format("'%s'"%arg,repr(vargs[arg]))) for arg in first: print_arg(arg) logprint('') for arg,_ in args_sorted: if arg in first: continue print_arg(arg) logprint('') return logprint
c1213f441696dbabedafe9888a681cf64bab4249
709,772
def tempo_para_percorrer_uma_distancia(distancia, velocidade): """ Recebe uma distância e a velocidade de movimentação, e retorna as horas que seriam gastas para percorrer em linha reta""" horas = distancia / velocidade return round(horas,2)
e7754e87e010988284a6f89497bb1c5582ea0e85
709,773
def find_last_index(l, x): """Returns the last index of element x within the list l""" for idx in reversed(range(len(l))): if l[idx] == x: return idx raise ValueError("'{}' is not in list".format(x))
f787b26dd6c06507380bf2e336a58887d1f1f7ea
709,774
def _CheckUploadStatus(status_code): """Validates that HTTP status for upload is 2xx.""" return status_code / 100 == 2
d799797af012e46945cf413ff54d2ee946d364ba
709,776
def identifyEntity(tweet, entities): """ Identify the target entity of the tweet from the list of entities :param tweet: :param entities: :return: """ best_score = 0 # best score over all entities targetEntity = "" # the entity corresponding to the best score for word in tweet: for entity in entities: cur_score = 0 # the score for the current entity if word == entity: cur_score = 1 # set the current score to 1 in case the entity name is mentioned in the tweet for entity_related_word in entities[entity]: if word == entity_related_word: cur_score = cur_score + 1 # increment the current score by 1 in case a related term to # the current entity is mentioned in the tweet if cur_score > best_score: # update the best score and the target entity best_score = cur_score targetEntity = entity return targetEntity
d6825dfddf01706ee266e0f1c82128a42bcb8554
709,777
def _apply_D_loss(scores_fake, scores_real, loss_func): """Compute Discriminator losses and normalize loss values Arguments --------- scores_fake : list discriminator scores of generated waveforms scores_real : list discriminator scores of groundtruth waveforms loss_func : object object of target discriminator loss """ loss = 0 real_loss = 0 fake_loss = 0 if isinstance(scores_fake, list): # multi-scale loss for score_fake, score_real in zip(scores_fake, scores_real): total_loss, real_loss, fake_loss = loss_func( score_fake=score_fake, score_real=score_real ) loss += total_loss real_loss += real_loss fake_loss += fake_loss # normalize loss values with number of scales (discriminators) # loss /= len(scores_fake) # real_loss /= len(scores_real) # fake_loss /= len(scores_fake) else: # single scale loss total_loss, real_loss, fake_loss = loss_func(scores_fake, scores_real) loss = total_loss return loss, real_loss, fake_loss
9432962af57193c07a268d00a3f1f01d372cb6a0
709,778
import random def pick_op(r, maxr, w, maxw): """Choose a read or a write operation""" if r == maxr or random.random() >= float(w) / maxw: return "write" else: return "read"
a45f53bf12538412b46f78e2c076966c26cf61ac
709,779
import os def isvalid(save_path, file): """ Returns true if the file described by the parameters is a file with the appropriate file extension. """ return os.path.isfile(os.path.join(save_path, file)) and \ str(file).endswith('.meta')
55f76212eaaae3be6706a01f3f28d24005d28f75
709,781
def min_index(array, i, j): """Pomocna funkce pro razeni vyberem. Vrati index nejmensiho prvku v poli 'array' mezi 'i' a 'j'-1. """ index = i for k in range(i, j): if array[k] < array[index]: index = k return index
4c59362fac2e918ba5a0dfe9f6f1670b3e95d68c
709,782
def average_precision(gt, pred): """ Computes the average precision. This function computes the average prescision at k between two lists of items. Parameters ---------- gt: set A set of ground-truth elements (order doesn't matter) pred: list A list of predicted elements (order does matter) Returns ------- score: double The average precision over the input lists """ if not gt: return 0.0 score = 0.0 num_hits = 0.0 for i,p in enumerate(pred): if p in gt and p not in pred[:i]: num_hits += 1.0 score += num_hits / (i + 1.0) return score / max(1.0, len(gt))
ca265471d073b6a0c7543e24ef0ba4f872737997
709,784
import math def rotate_coo(x, y, phi): """Rotate the coordinates in the *.coo files for data sets containing images at different PAs. """ # Rotate around center of image, and keep origin at center xin = 512. yin = 512. xout = 512. yout = 512. cos = math.cos(math.radians(phi)) sin = math.sin(math.radians(phi)) xrot = (x - xin) * cos - (y - yin) * sin + xout yrot = (x - xin) * sin + (y - yin) * cos + yout return [xrot, yrot]
a57a4c36119e96d757bd23f28a0790f6d68661fc
709,785
def ip_block_array(): """ Return an ipBlock array instance fixture """ return ['10.0.0.1', '10.0.0.2', '10.0.0.3']
c74756f34b97d2550cb238bd63e0c9505f3935d3
709,786
import os def _find_modules_and_directories(top_level_directory): """ Recursive helper function to find all python files included in top level package. This will recurse down the directory paths of any package to find all modules and subpackages in order to create an exhaustive list of all python files within a given package. :param top_level_directory: Path to the top level of a python package. :type top_level_directory: str :return: Returns a list of paths to all python files within that package. :rtype: list [str] """ modules = [] directories = [] for item in os.listdir(top_level_directory): if item.endswith(".py"): modules.append(os.path.join(top_level_directory, item)) elif os.path.isdir(os.path.join(top_level_directory, item)): directories.append(os.path.join(top_level_directory, item)) for directory in directories: modules.extend(_find_modules_and_directories(directory)) return modules
2aecb5974f83ce01b2a8e4a6fb8313399756c1d4
709,787
import math import torch def log_density_gaussian(x, mu, logvar): """Calculates log density of a gaussian. Parameters ---------- mu: torch.Tensor or np.ndarray or float Mean. logvar: torch.Tensor or np.ndarray or float Log variance. """ normalization = - 0.5 * (math.log(2 * math.pi) + logvar) inv_var = torch.exp(-logvar) log_density = normalization - 0.5 * ((x - mu)**2 * inv_var) return log_density
3fdc751aa58b3ec82e1aa454f593879d5da4c310
709,788
def split_page(array, limit, index): """ 按限制要求分割数组,返回下标所指向的页面 :param array: 需要分割的数组 :param limit: 每个数组的大小 :param index: 需要返回的分割后的数组 :return: 数组 """ end = index * limit start = end - limit return array[start:end]
ecce83d6e2e09d47e124536f294ece1e1631e6b6
709,789
def Flatten(matrix): """Flattens a 2d array 'matrix' to an array.""" array = [] for a in matrix: array += a return array
00389b4dd295274d8081331d6ae78f233f0b5b59
709,790
def get_ws_param(args, attr): """get the corresponding warm start parameter, if it is not exists, use the value of the general parameter""" assert hasattr(args, attr), 'Invalid warm start parameter!' val = getattr(args, attr) if hasattr(args, 'ws_' + attr): ws_val = getattr(args, 'ws_' + attr) if isinstance(ws_val, str): ws_val = ws_val.strip() if ws_val or isinstance(ws_val, list) or isinstance(ws_val, int) or isinstance(ws_val, float): val = ws_val return val
ea1d762654153602f8ad54048e54995c26304e40
709,791
def escape_name(name): """Escape sensor and request names to be valid Python identifiers.""" return name.replace('.', '_').replace('-', '_')
856b8fe709e216e027f5ab085dcab91604c93c2e
709,792
def multiset_counter(mset): """ Return the sum of occurences of elements present in a token ids multiset, aka. the multiset cardinality. """ return sum(mset.values())
36885abd5bf666aa6c77a262a647c227e46d2e88
709,793
def lengthenFEN(fen): """Lengthen FEN to 71-character form (ex. '3p2Q' becomes '111p11Q')""" return fen.replace('8','11111111').replace('7','1111111') \ .replace('6','111111').replace('5','11111') \ .replace('4','1111').replace('3','111').replace('2','11')
f49cdf8ad6919fbaaad1abc83e24b1a33a3ed3f8
709,794
import os import pickle def load_flags(save_dir, save_file="flags.obj"): """ This function inflate the pickled object to flags object for reuse, typically during evaluation (after training) :param save_dir: The place where the obj is located :param save_file: The file name of the file, usually flags.obj :return: flags """ with open(os.path.join(save_dir, save_file), 'rb') as f: # Open the file flags = pickle.load(f) # Use pickle to inflate the obj back to RAM return flags
44cc70f185645799fdfd81c8806f3d3f8585fef4
709,795
def pancakeSort(self, A): # ! 这个方法实际上是在每轮循环中寻找最大的那个数,使其在正确的位置 """ :type A: List[int] :rtype: List[int] """ bucket = sorted(A) ans = [] for k in range(len(A),0,-1): i = A.index(bucket.pop())+1 ans += [i, k] A = A[i:k][::-1] + A[:i] + A[k:] print(A) return ans
35d358c6631f5cc708232f67a3e55d685116dff8
709,797
def includeme(config): """ Get build Git repository directory and make it accessible to all requests generated via Cornice """ # Make DB connection accessible as a request property def _get_repos(request): _settings = request.registry.settings repo_dir = _settings['repo_basedir'] return repo_dir config.add_request_method(_get_repos, 'repo_dir', reify=True)
f2d73eb01b616f79059f4001c7b3faad67f48cd2
709,798