content
stringlengths
22
815k
id
int64
0
4.91M
def get_locks(gdb): """Generates a list of current locks in a gdb.""" # TODO: change to `glob(os.path.join(gdb, "*.lock"))` locks = [f for f in os.listdir(gdb) if ".lock" in f] for lock in locks: try: with open(gdb, "w") as f: pass except IOError: yield lock
700
def test_encode_json_strings(tmpdir): """Ensure that JSON values are preserved beteen NLJ and CSV.""" infile = str(tmpdir.mkdir('test-in').join('in.json')) outfile = str(tmpdir.mkdir('test-out').join('out.json')) roundtrip_file = str(tmpdir.mkdir('test-roundtrip').join('roundtrip.json')) # Write NLJ where a value is a dictionary to a file and convert to a CSV expected = { 'field1': 'value', 'field2': {'key': 'val'} } with nlj.open(infile, 'w') as dst: dst.write(expected) result = CliRunner().invoke(main, [ 'nlj2csv', infile, outfile ]) assert result.exit_code == 0 # Convert the CSV from the previous step back to NLJ result = CliRunner().invoke(main, [ 'csv2nlj', outfile, roundtrip_file ]) assert result.exit_code == 0 with nlj.open(roundtrip_file) as src: actual = next(src) # Compare JSON -> JSON assert expected == actual
701
def init_net(net, init_type='normal', init_gain=0.02, gpu_ids=()): """Initialize a network: 1. register CPU/GPU device (with multi-GPU support); 2. initialize the network weights Parameters: net (network) -- the network to be initialized init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal init_gain (float) -- scaling factor for normal, xavier and orthogonal. gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2 Return an initialized network. """ if len(gpu_ids) > 0: assert(torch.cuda.is_available()) net.to(gpu_ids[0]) net = torch.nn.DataParallel(net, gpu_ids) # multi-GPUs init_weights(net, init_type, init_gain=init_gain) return net
702
def remove_from_group(group_name, nodes=None, nodes_by_col='SUID', edges=None, edges_by_col='SUID', network=None, base_url=DEFAULT_BASE_URL): """Remove the specified nodes and edges from the specified group. Args: group_name (str): Specifies the name used to identify the group nodes (list or str or int or None): List of nodes or keyword: selected, unselected or all. If node list: ``list`` of node names or SUIDs, comma-separated string of node names or SUIDs, or scalar node name or SUID). Node names should be found in the ``SUID`` column of the ``node table`` unless specified in ``nodes_by_col``. If list is None, default is currently selected nodes. nodes_by_col (str): name of node table column corresponding to provided nodes list. Default is 'SUID'. edges (str or list or int or None): List of edges or keyword: selected, unselected or all. If edge list: ``list`` of edge names or SUIDs, comma-separated string of edge names or SUIDs, or scalar edge name or SUID). Edge names should be found in the ``SUID`` column of the ``edge table`` unless specified in ``edges_by_col``. If list is None, default is currently selected edges. edges_by_col (str): name of edge table column corresponding to provided edges list. Default is 'SUID'. network (SUID or str or None): Name or SUID of a network. Default is the "current" network active in Cytoscape. base_url (str): Ignore unless you need to specify a custom domain, port or version to connect to the CyREST API. Default is http://127.0.0.1:1234 and the latest version of the CyREST API supported by this version of py4cytoscape. Returns: dict: {} Raises: CyError: if network name or SUID doesn't exist requests.exceptions.RequestException: if can't connect to Cytoscape or Cytoscape returns an error Examples: >>> remove_from_group('Group 1', ['GDS1', 'SIP4', 'PDC1'], nodes_by_col='COMMON') # remove nodes by common name & all their edges {} >>> remove_from_group('Group 1', 'GDS1, SIP4, PDC1', nodes_by_col='COMMON') # remove nodes by common name & all their edges {} >>> remove_from_group('Group 1', [76545, 75499, 80299]) # remove nodes by SUID & all their edges {} >>> remove_from_group('Group 1', 80299) # remove node by SUID & all its edges {} >>> remove_from_group('Group 1') # remove all selected nodes and edges {} >>> remove_from_group('Group 1', nodes=[], edges=[78565, 79565]) # remove edges but not any nodes {} >>> remove_from_group('Group 1', nodes='unselected', edges='unselected') # remove all unselected nodes and edges {} """ if isinstance(nodes, str) and nodes in {'all', 'selected', 'unselected'}: nodes_by_col = None node_list = prep_post_query_lists(nodes, nodes_by_col) if isinstance(edges, str) and edges in {'all', 'selected', 'unselected'}: edges_by_col = None edge_list = prep_post_query_lists(edges, edges_by_col) net_suid = networks.get_network_suid(network, base_url=base_url) res = commands.commands_post( f'group remove groupName="{group_name}" nodeList="{node_list}" edgeList="{edge_list}" network="SUID:{net_suid}"', base_url=base_url) return res
703
def filter_bank_2high(t, Nj, Nj_1, ac=2.0, bc=2.0): """ computes the filter bank for control points N_j, Nj_1 given the variable t :param t: data points on the real line R arranged in numpy array :param Nj: control point, Nj > Nj_1, integer :param Nj_1: control point, Nj > Nj_1, integer :param ac: between (1, 2]. Default 2.0 :param bc: bc < 2. Default 2.0 :return: (ha, hb1, hb2) low-pass filter ha and high-pass filters hb1 and hb2 at t, all in numpy array format """ # a_hat a_cR = (1 + Nj_1) / ac a_epsR = Nj_1 - a_cR a_cL = -a_cR a_epsL = a_epsR # b_hat_1 b1_cL = a_cR b1_epsL = a_epsR b1_cR = (Nj_1 + Nj) / bc b1_epsR = Nj - b1_cR # b_hat_2 b2_cL = b1_cR b2_epsL = b1_epsR b2_cR = 2 * Nj b2_epsR = 1 # supp(ha) = [0, 1 / 4] ha = hmask(t, a_cL, a_epsL, a_cR, a_epsR) # supp(hb1) = [1 / 8, 1 / 2] hb1 = hmask(t, b1_cL, b1_epsL, b1_cR, b1_epsR) # supp(hb2) = [1 / 4, 1 / 2] hb2 = hmask(t, b2_cL, b2_epsL, b2_cR, b2_epsR) return ha, hb1, hb2
704
def to_undirected(graph, copy_node_feat=True, copy_edge_feat=False): """Convert a graph to an undirected graph. Args: graph (pgl.Graph): The input graph, should be in numpy format. copy_node_feat (bool): Whether to copy node feature in return graph. Default: True. copy_edge_feat (bool): [Alternate input] Whether to copy edge feature in return graph. Returns: g (pgl.Graph): Returns an undirected graph. """ if graph.is_tensor(): raise TypeError("The input graph should be numpy format.") inv_edges = np.zeros(graph.edges.shape) inv_edges[:, 0] = graph.edges[:, 1] inv_edges[:, 1] = graph.edges[:, 0] edges = np.vstack((graph.edges, inv_edges)) edges = np.unique(edges, axis=0) g = pgl.graph.Graph(num_nodes=graph.num_nodes, edges=edges) if copy_node_feat: for k, v in graph._node_feat.items(): g._node_feat[k] = v if copy_edge_feat: # TODO(daisiming): Support duplicate edge_feature. raise NotImplementedError( "The copy of edge feature is not implemented currently.") return g
705
def as_public(): """Return requests session without authentication""" return BaseUrlSession()
706
def transform_real_2_sim(real_position): """ Transforms a position from the 'real' coordinate system to the 'sim' coordinate system. :param real_position: dictionary with 'x', 'y' and 'z' keys to floating point values :return: position in sim space as dictionary with 'x', 'y' and 'z' keys to floating point values """ real_pos = np.array([real_position["x"], real_position["y"], 1]) sim_pos_np = np.dot(REAL_2_SIM_TRANSFORM, real_pos) sim_pos = {"x": sim_pos_np[0], "y": 0.9010001, "z": sim_pos_np[1]} return sim_pos
707
def generate_tgt_mask(sz): """Generate a square mask for the sequence. The masked positions are filled with float('-inf'). Unmasked positions are filled with float(0.0). This function is a slight modification of the version in the PyTorch repository. Parameters ---------- sz : int The length of the target sequence. """ mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1) mask = ( mask.float() .masked_fill(mask == 0, float("-inf")) .masked_fill(mask == 1, float(0.0)) ) return mask
708
def SceneAddPipeline(builder, pipeline): """This method is deprecated. Please switch to AddPipeline.""" return AddPipeline(builder, pipeline)
709
def SynthesizeUserId(email): """Return a synthetic user ID from an email address. Note that this is not the same user ID found in the production system. Args: email: An email address. Returns: A string userid derived from the email address. """ user_id_digest = _MD5_FUNC(email.lower()).digest() user_id = '1' + ''.join(['%02d' % ord(x) for x in user_id_digest])[:20] return user_id
710
def store_to_file(file_name, series, col_name, replace=False): """Add series to file.""" path = config.DATADIR filepath = os.path.join(path, file_name) df = pd.read_csv(filepath) if (col_name in df) and (not replace): return f'{col_name} already in {file_name}. Not added.' df[col_name] = series df.to_csv(filepath, index=False) return f'{col_name} added to {file_name}.'
711
def compute_jacobian(fn, x0: torch.Tensor, bs: int): """ Computes the Jacobian matrix of the given function at x0, using vector-Jacobian products """ input_shape = x0.shape assert len(input_shape) == 3 dim = x0.numel() eye = torch.eye(dim, dtype=x0.dtype, device=x0.device) # Forward pass x0rep = x0.detach()[None].repeat([bs] + [1] * len(input_shape)) # repeat along batch axis x0rep.requires_grad = True z0rep = fn(x0rep) zshape = z0rep.shape[1:] assert zshape.numel() == dim # Compute batches of rows of the Jacobian rows = [] for row_start in trange(0, dim, bs, desc='jacobian', leave=False): # Pre-pad with extra rows to ensure that batch size stays constant row_end = min(row_start + bs, dim) num_rows = row_end - row_start if num_rows != bs: assert num_rows < bs pre_pad_rows = bs - num_rows else: pre_pad_rows = 0 assert row_start - pre_pad_rows >= 0 # vector-Jacobian product with rows of an identity matrix g, = torch.autograd.grad( z0rep, x0rep, grad_outputs=eye[row_start - pre_pad_rows:row_end].reshape(row_end - row_start + pre_pad_rows, *zshape), retain_graph=True ) assert g.shape == x0rep.shape rows.append(g.view(g.shape[0], -1)[pre_pad_rows:, :]) jacobian = torch.cat(rows, dim=0) assert jacobian.shape == (dim, dim) return jacobian
712
def pixel_distance(A, B): """ In 9th grade I sat in geometry class wondering "when then hell am I ever going to use this?"...today is that day. Return the distance between two pixels """ (col_A, row_A) = A (col_B, row_B) = B return math.sqrt(math.pow(col_B - col_A, 2) + math.pow(row_B - row_A, 2))
713
def test_out_of_scope_passed_error(logfile): """ If an error is thrown out of band ensure there's no error data Unless we pass a value to the `error` field, in which case stuff it in `error_message` """ configure_logging() log = get_logger() log.error("0 test", exc_info=sys.exc_info()) log.error("1 test", exc_info=True) log.error("2 test", error="OVERRIDDEN MESSAGE") log.error("3 test", error=100.0) log.error("4 test", error=True) log.warn("5 test", include_exception=True) log.info("6 test", include_exception=True) log.exception("7 test") lines = logfile.readlines() for i, line in enumerate(lines): out = json.loads(line) assert out['event'] == "{} test".format(i) assert 'error_name' not in out assert 'error_traceback' not in out if i == 2: assert out['error_message'] == "OVERRIDDEN MESSAGE" elif i == 3: assert out['error_message'] == 100.0 elif i == 4: assert out['error_message'] is True else: assert 'error_message' not in out
714
def word_ngrams(s, n=3, token_fn=tokens.on_whitespace): """ Word-level n-grams in a string By default, whitespace is assumed to be a word boundary. >>> ng.word_ngrams('This is not a test!') [('This', 'is', 'not'), ('is', 'not', 'a'), ('not', 'a', 'test!')] If the sequence's length is less than or equal to n, the n-grams are simply the sequence itself. >>> ng.word_ngrams('Test!') [('Test!')] Args: s: a string Returns: list: tuples of word-level n-grams """ tokens = token_fn(s) return __ngrams(tokens, n=min(len(tokens), n))
715
def phosites_detail(text): """ create detail view output of phosphosites by accession. :param text: string of phos group ID :return: template """ results = browse_queries.browse_detail(text,'Phosphosite') table = browse_queries.phos_kin_query(text) # pass tables, results and style indicator to template for rendering, plus # variables for title info (related and text of acc no) return render_template('search_results.html', title="Phosphosite", style='double', results=results, table=table, related="Kinases", text=text)
716
def convert_to_np_arrays(X): """ Converts the input arrays to dense numpy arrays to allow the methods to work properly """ try: X = X.todense() except: pass X = np.array(X) if len(X.shape) > 2: X = reduce_shape(X) return X
717
def cleanup_handler(signal_received, frame): """Handle cleanup when exiting with Ctrl-C. Args: signal_received: The signal number received. frame: The current stack frame. """ global force_non_graceful_cleanup if not force_non_graceful_cleanup: print(f"{bcolor.FAIL}SIGINT or Ctrl-C detected. Exiting gracefully...{bcolor.ENDC}") if thread_manager.is_alive('Backup'): thread_manager.kill('Backup') if thread_manager.is_alive('Backup'): force_non_graceful_cleanup = True print(f"{bcolor.FAIL}Press Ctrl-C again to force stop{bcolor.ENDC}") while thread_manager.is_alive('Backup'): pass print(f"{bcolor.FAIL}Exiting...{bcolor.ENDC}") if thread_manager.is_alive('backupTimer'): thread_manager.kill('backupTimer') else: print(f"{bcolor.FAIL}SIGINT or Ctrl-C detected. Force closing...{bcolor.ENDC}") exit(0)
718
def main(wf): """Run the workflow. Args: wf (workflow.Workflow): Active Workflow object. """ from docopt import docopt # Parse command-line arguments and call appropriate # command function. args = docopt(__doc__, wf.args, version=wf.version) log.debug('args=%r', args) if args.get('list'): return do_list(wf, args) if args.get('open'): return do_open(wf, args) if args.get('update'): return do_update(wf, args)
719
def remove(c, containers=False, images=False): """ Clean up """ if containers: c.run("sudo docker rm $(docker ps -a -q)") if images: c.run("sudo docker rmi $(docker images -q)")
720
def run_sorting(): """ This is just a test function, to avoid run the GUI every time. """ import csv import itertools """ ##To run fibers/cells/fmd/dtd/... folders = ['/Users/romuere/Dropbox/CBIR/fibers/database/no_fibers/*','/Users/romuere/Dropbox/CBIR/fibers/database/yes_fibers/*'] fname_database = [] labels_database = np.empty(0) for id,f in enumerate(folders): files = glob.glob(f) labels_database = np.append(labels_database, np.zeros(len(files))+id) fname_database = fname_database+files print(files) print(len(fname_database)) preprocessing_method = 'log' feature_extraction_method = 'glcm' searching_method = 'lsh' retrieval_number = 10 similarity_metric = 'ed' path_output = '/Users/romuere/Dropbox/CBIR/fibers/results/' list_of_parameters = ['1','2'] path_cnn_trained = '' fname_retrieval = fname_database[0:3] + fname_database[2001:2003] labels_retrieval = np.concatenate((labels_database[0:3],labels_database[2001:2003])) """ ##To run scattering images path = '/Users/romuere/Desktop/als/kyager_data_raw' files_database_class0 = '/Users/romuere/Desktop/als/kyager_data_raw/SAXS.txt' files_database_class1 = '/Users/romuere/Desktop/als/kyager_data_raw/WAXS.txt' files_retrieval_class0 = '/Users/romuere/Desktop/als/kyager_data_raw/SAXS_query.txt' files_retrieval_class1 = '/Users/romuere/Desktop/als/kyager_data_raw/WAXS_query.txt' #------# reader = csv.reader(open(files_database_class0)) fname_database_class0 = list(reader) fname_database_class0 = list(itertools.chain(*fname_database_class0)) labels_class_0 = np.zeros(len(fname_database_class0)) reader = csv.reader(open(files_database_class1)) fname_database_class1 = list(reader) fname_database_class1 = list(itertools.chain(*fname_database_class1)) labels_class_1 = np.zeros(len(fname_database_class1))+1 fname_database = fname_database_class0+fname_database_class1 fname_database = [path+x for x in fname_database] labels_database = np.concatenate((labels_class_0,labels_class_1)) #------# reader = csv.reader(open(files_retrieval_class0)) fname_retrieval_class0 = list(reader) fname_retrieval_class0 = list(itertools.chain(*fname_retrieval_class0)) labels_retrieval_class0 = np.zeros(len(fname_retrieval_class0)) reader = csv.reader(open(files_retrieval_class1)) fname_retrieval_class1 = list(reader) fname_retrieval_class1 = list(itertools.chain(*fname_retrieval_class1)) labels_retrieval_class1 = np.zeros(len(fname_retrieval_class1)) fname_retrieval = fname_retrieval_class0+fname_retrieval_class1 fname_retrieval = [path+x for x in fname_retrieval] labels_retrieval = np.concatenate((labels_retrieval_class0,labels_retrieval_class1)) #------# preprocessing_method = 'log' feature_extraction_method = 'lbp' searching_method = 'lsh' retrieval_number = 10 similarity_metric = 'ed' path_output = '/Users/romuere/Desktop/als/output/' list_of_parameters = ['2']#['2','8','8'] path_cnn_trained = '' run.run_command_line(fname_database,labels_database,fname_retrieval,labels_retrieval,path_cnn_trained,path_output,feature_extraction_method,similarity_metric,retrieval_number,list_of_parameters,preprocessing_method,searching_method, isEvaluation = False)
721
def reduce_arr(arr): """ Return which elements on which axis are unique Args: arr (np.ndarray) : input array which to reduce to unique value Returns: reduced array(np.ndarray) : array with reduced data. data_axis (list) : the axises that have changing data. """ ndim = len(arr.shape) data_axis = [] slice_array = () for i in range(ndim): mn = np.min(arr, axis=i) mx = np.max(arr, axis=i) eq = np.all(mn == mx) if not eq: data_axis.append(ndim - i - 1) slice_array += (slice(None),) else: slice_array += (0,) red_ar = arr[slice_array] return red_ar, data_axis
722
def nll_lorentzian(preds, target, gamma): """ Isotropic lorentzian loss function :param preds: prediction values from NN of size [batch, particles, timesteps, (x,y,v_x,v_y)] :param target: target data of size [batch, particles, timesteps, (x,y,v_x,v_y)] :param gamma: The tensor for the FWHM of the distribution of size [batch, particles, timesteps, (x,y,v_x,v_y)] :return: value of the loss function normalised by (batch * number of atoms) """ gammasquared = gamma ** 2 neg_log_p = torch.log(1+((preds - target) ** 2 / (gammasquared))) neg_log_p += torch.log(gamma) return neg_log_p.sum() / (target.size(0) * target.size(1))
723
def get_archive(): """Ensure that the archive file exists and return its path. This is a function so the path can be made configurable in the future. Returns: :obj:`str`: The full local path to the archive file. """ filename = '/config/archive.txt' archfile = Path(filename) if not archfile.exists(): archfile.touch() return filename
724
def _filter_unique_configs( configs: Sequence[ProblemConfig], filter_fn: Callable[[ProblemConfig], bool] = lambda _: True, ) -> List[ProblemConfig]: # pytype: disable=annotation-type-mismatch """Filters a list of problem_config to their unique occurrences for testing. Args: configs: list of ProblemConfig. filter_fn: optional function to apply only to subset meeting this condition. Returns: List of unique occurrences for testing. """ observed_configs = set() new_configs = [] for problem_config in configs: if filter_fn(problem_config): if problem_config not in observed_configs: new_configs.append(problem_config) observed_configs.add(problem_config) return new_configs
725
def convert_units(str): """ Convert some string with binary prefix to int bytes""" unit = ''.join(ele for ele in str if not ele.isdigit()).strip().lower() return int(''.join(ele for ele in str if ele.isdigit()))*{ "b": 1, "B": 1, "k": 2**10, "kb": 2**10, "m": 2**20, "mb": 2**20, "g": 2**30, "gb": 2**30, "t": 2**40, "tb": 2**40 }.get(unit, 1)
726
def match_piecewise(candidates: set, symbol: str, sep: str='::') -> set: """ Match the requested symbol reverse piecewise (split on ``::``) against the candidates. This allows you to under-specify the base namespace so that ``"MyClass"`` can match ``my_namespace::MyClass`` Args: candidates: set of possible matches for symbol symbol: the symbol to match against sep: the separator between identifier elements Returns: set of matches """ piecewise_list = set() for item in candidates: split_symbol = symbol.split(sep) split_item = item.split(sep) split_symbol.reverse() split_item.reverse() min_length = len(split_symbol) split_item = split_item[:min_length] if split_symbol == split_item: piecewise_list.add(item) return piecewise_list
727
def is_kube_version_supported(kube_version, min_version=None, max_version=None): """Check if the k8s version is supported by the application. :param kube_version: the running or target k8s version :param min_version (optional): minimum k8s version supported by the app :param max_version (optional): maximum k8s version supported by the app :returns bool: True if k8s version is supported """ if ((min_version is not None and LooseVersion(kube_version) < LooseVersion(min_version)) or (max_version is not None and LooseVersion(kube_version) > LooseVersion(max_version))): return False return True
728
def chebi(name=None, identifier=None): """Build a ChEBI abundance node. :rtype: Abundance """ return Abundance(namespace='CHEBI', name=name, identifier=identifier)
729
def plot_slice(sliceX, color, ax=None, s=100): """ Plots slice spatial coordinates. param: sliceX - AnnData Object of slice param: color - scatterplot color param: ax - Pre-existing axes for the plot. Otherwise, call matplotlib.pyplot.gca() internally. param: s - size of spots """ sns.scatterplot(x = sliceX.obsm['spatial'][:,0],y = sliceX.obsm['spatial'][:,1],linewidth=0,s=s, marker=".",color=color,ax=ax) if ax: ax.invert_yaxis() ax.axis('off')
730
def get_group(request): """returns all the groups in database """ group_id = request.matchdict.get('id', -1) group = Group.query.filter_by(id=group_id).first() return [ { 'id': group.id, 'name': group.name, 'thumbnail_full_path': group.thumbnail.full_path if group.thumbnail else None, 'created_by_id': group.created_by.id, 'created_by_name': group.created_by.name, 'users_count': len(group.users), } ]
731
def create_histogram(path_to_image, target_path=''): """ creates a histogram of a given image and either shows or saves a plot Args: path_to_image: path to the image target_path: if given, saves a plot, otherwise (if empty) shows the plot Returns: the histogram plot """ image = cv2.imread(path_to_image) depth = image.shape[2] for z in range(depth): im = image[:, :, z] mi = im.min() ma = im.max() if mi < 0 or ma > 255: print("range error: min=" + str(mi) + " max=" + ma) exit() # V1 # plt.hist(im.ravel(), 256, [0, 256]) # V2 # calculate mean value from RGB channels and flatten to 1D array vals = im.flatten() # plot histogram with 255 bins # b, bins, patches = plt.hist(vals, 255, stacked=True, density=True) counts, bins = np.histogram(vals, 255) counts = (counts - min(counts)) / (max(counts) - min(counts)) plt.hist(bins[:-1], bins, weights=counts) plt.xlim([0, 255]) # plt.show() # plt.title(path_to_image) plt.xlabel('pixel value') plt.ylabel('count') if target_path == '': plt.show() else: plt.savefig(target_path + 'histo') plt.clf() return plt
732
def task_6_list_all_supplier_countries(cur) -> list: """ List all supplier countries Args: cur: psycopg cursor Returns: 29 records """ cur.execute("""SELECT country FROM suppliers""") return cur.fetchall()
733
def _energy_to_length_factor(e_unit, l_unit): """ Convert the units of Planck's constant and speed of light :param e_unit: :type e_unit: str :param l_unit: :type l_unit: str :return: c,h """ dest_h_u = ug.parse_units('%s s' % e_unit) dest_c_u = ug.parse_units('%s/s' % l_unit) if dest_h_u.dimensionality != _h_unit.dimensionality: raise ValueError("e_unit should be a valid energy unit") if dest_c_u.dimensionality != _c_unit.dimensionality: raise ValueError('l_unit should be a valid length unit') h = ug.convert(sc.h, _h_unit, dest_h_u) c = ug.convert(sc.c, _c_unit, dest_c_u) return c, h
734
def kev_to_wavelength(kev): """Calculate the wavelength from kev""" lamda = 12.3984 / kev #keV to Angstrom return lamda
735
def test_vault_kv_poll_refetch(): """ Test the KV v2 token refetch operation """ with run_vault() as [vault_client, get_audit_events]: vault_client.write("secret/data/app", data={"env": "dev"}) with Agent.run( dedent( f""" intervalSeconds: 2 globalDimensions: env: {{"#from": "vault:secret/data/app[data.env]"}} configSources: vault: vaultToken: {vault_client.token} vaultAddr: {vault_client.url} kvV2PollInterval: 10s monitors: - type: collectd/uptime """ ) ) as agent: assert wait_for(p(has_datapoint, agent.fake_services, dimensions={"env": "dev"})) assert audit_read_paths(get_audit_events()) == ["secret/data/app"], "expected one read" vault_client.write("secret/data/app", data={"env": "prod"}) assert wait_for(p(has_datapoint, agent.fake_services, dimensions={"env": "prod"})) assert "secret/metadata/app" in audit_read_paths(get_audit_events())
736
def _energy_and_power_to_wave_vector( energy_cap, base_wave_vector_path, target_wave_vector_path): """Add captured wave energy value from energy_cap to a field in wave_vector. The values are set corresponding to the same I,J values which is the key of the dictionary and used as the unique identifier of the shape. Parameters: energy_cap (dict): a dictionary with keys (I,J), representing the wave energy capacity values. base_wave_vector_path (str): a path to a wave point shapefile with existing fields to copy from. target_wave_vector_path (str): a path to the wave point shapefile to write the new field/values to. Returns: None. """ _copy_vector_or_raster(base_wave_vector_path, target_wave_vector_path) target_wave_vector = gdal.OpenEx( target_wave_vector_path, gdal.OF_VECTOR | gdal.GA_Update) target_wave_layer = target_wave_vector.GetLayer() # Create the Captured Energy and Wave Power fields for the shapefile for field_name in [_CAP_WE_FIELD, _WAVE_POWER_FIELD]: field_defn = ogr.FieldDefn(field_name, ogr.OFTReal) field_defn.SetWidth(24) field_defn.SetPrecision(11) target_wave_layer.CreateField(field_defn) # For all of the features (points) in the shapefile, get the corresponding # point/value from the dictionary and set the _CAP_WE_FIELD field as # the value from the dictionary for feat in target_wave_layer: # Calculate and set the Captured Wave Energy field value_i = feat.GetField('I') value_j = feat.GetField('J') we_value = energy_cap[(value_i, value_j)] feat.SetField(_CAP_WE_FIELD, we_value) # Calculate and set the Wave Power field height = feat.GetFieldAsDouble(_HEIGHT_FIELD) # in meters period = feat.GetFieldAsDouble(_PERIOD_FIELD) depth = feat.GetFieldAsInteger(_DEPTH_FIELD) depth = numpy.absolute(depth) # wave frequency calculation (used to calculate wave number k) tem = (2.0 * math.pi) / (period * _ALFA) # wave number calculation (expressed as a function of # wave frequency and water depth) k = numpy.square(tem) / (_GRAV * numpy.sqrt( numpy.tanh((numpy.square(tem)) * (depth / _GRAV)))) # Setting numpy overflow error to ignore because when numpy.sinh # gets a really large number it pushes a warning, but Rich # and Doug have agreed it's nothing we need to worry about. numpy.seterr(over='ignore') # wave group velocity calculation (expressed as a # function of wave energy period and water depth) wave_group_velocity = (((1 + ( (2 * k * depth) / numpy.sinh(2 * k * depth))) * numpy.sqrt( (_GRAV / k) * numpy.tanh(k * depth))) / 2) # Reset the overflow error to print future warnings numpy.seterr(over='print') # Wave power calculation. Divide by 1000 to convert W/m to kW/m # Note: _SWD: Sea water density constant (kg/m^3), # _GRAV: Gravitational acceleration (m/s^2), # height: in m, wave_group_velocity: in m/s wave_pow = ((((_SWD * _GRAV) / 16) * (numpy.square(height)) * wave_group_velocity) / 1000) feat.SetField(_WAVE_POWER_FIELD, wave_pow) # Save the feature modifications to the layer. target_wave_layer.SetFeature(feat) feat = None target_wave_layer = None target_wave_vector = None
737
def _convert_rde_to_1_0_format(rde_data: dict) -> dict: """Convert defined entity to RDE 1.0. :param DefEntity rde_data: Defined entity dictionary :return: converted defined entity :rtype: dict """ new_rde = common_models.DefEntity(**rde_data) new_native_entity: AbstractNativeEntity = rde_utils.convert_runtime_rde_to_input_rde_version_format( # noqa: E501 new_rde.entity, rde_constants.RDEVersion.RDE_1_0_0) new_rde.entity = new_native_entity new_rde.entityType = common_models.EntityType.NATIVE_ENTITY_TYPE_1_0_0.value.get_id() # noqa: E501 return new_rde.to_dict()
738
def additive_symbols(tokens, base_url): """``additive-symbols`` descriptor validation.""" results = [] for part in split_on_comma(tokens): result = pad(remove_whitespace(part), base_url) if result is None: return if results and results[-1][0] <= result[0]: return results.append(result) return tuple(results)
739
def check_detection(frame, yx_exp, fwhm, snr_thresh, deltapix=3): """ Verify if injected companion is recovered. Parameters ---------- frame : 2d ndarray yx_exp : tuple(y, x) Expected position of the fake companion (= injected position). fwhm : int or float FWHM. snr_thresh : int or float, optional S/N threshold. deltapix : int or float, optional Error margin in pixels, between the expected position and the recovered. """ def verify_expcoord(vectory, vectorx, exp_yx): for coor in zip(vectory, vectorx): print(coor, exp_yx) if np.allclose(coor[0], exp_yx[0], atol=deltapix) and \ np.allclose(coor[1], exp_yx[1], atol=deltapix): return True return False table = vip.metrics.detection(frame, fwhm=fwhm, mode='lpeaks', bkg_sigma=5, matched_filter=False, mask=True, snr_thresh=snr_thresh, plot=False, debug=True, full_output=True, verbose=True) msg = "Injected companion not recovered" assert verify_expcoord(table.y, table.x, yx_exp), msg
740
def plot_decision_boundary(h, X, Y,step=0.1,x1_range=None,x2_range=None,title=""): """ Args: h(class:'function'): hypothesis (Model) X: input dataset (Also Used for determining ranges if xi_range=None) Y: output dataset (Shoud have only 1 and -1 as element values) step: step size to use for creating mesh-grid """ if x1_range is None and x2_range is None: x1r = (X[:,0].min(), X[:,0].max()) x2r = (X[:,1].min(), X[:,1].max()) elif (x1_range is not None) and (x2_range is not None): x1r = x1_range x2r = x2_range else: raise AssertionError("x1_range and x2_range should be either both None\ or non-None.") xx, yy = np.meshgrid(np.arange(x1r[0], x1r[1], step), np.arange(x2r[0], x2r[1], step)) f, ax = plt.subplots() Z = h.predict(np.c_[xx.ravel(), yy.ravel()]) Z = Z.reshape(xx.shape) ax.contourf(xx, yy, Z, cmap=plt.cm.Paired, alpha=0.1) for i in range(X.shape[0]): c = "blue" if Y[i] == 1: c = "red" ax.scatter(X[i,0], X[i,1], c=c, alpha=0.5) plt.title(title) plt.show()
741
def _index_list(key_or_list, direction=None): """Helper to generate a list of (key, direction) pairs. Takes such a list, or a single key, or a single key and direction. """ if direction is not None: return [(key_or_list, direction)] else: if isinstance(key_or_list, string_type): return [(key_or_list, ASCENDING)] elif not isinstance(key_or_list, (list, tuple)): raise TypeError("if no direction is specified, " "key_or_list must be an instance of list") return key_or_list
742
def load_only_test(cfg): """Load and process test data only Args: cfg (dict): configuration file Returns: DataLoader: test DataLoader """ # Set test path path_to_test = os.path.join(cfg["DATA_DIR"], "test/") # Load the test set test_dataset = TestLoader(path_to_test) # DatasetTransformer data_transforms = apply_preprocessing(cfg=cfg["DATASET"]["PREPROCESSING"]) test_dataset = DatasetTransformer( test_dataset, transforms.Compose(data_transforms["test"]) ) # Dataloaders test_loader = DataLoader( dataset=test_dataset, batch_size=cfg["TEST"]["BATCH_SIZE"], shuffle=False, num_workers=cfg["DATASET"]["NUM_THREADS"], ) if cfg["DATASET"]["VERBOSITY"]: print( f"The test set contains {len(test_loader.dataset)} images," f" in {len(test_loader)} batches" ) return test_loader
743
def update_covid(update_name: str) -> None: """Updates the global webpage_covid_data list, this is in main.py and is what gets passed to the web page""" logging.info(f"Updating COVID data due to update '{update_name}'") global webpage_covid_data location = get_config_data()["local_location"] location_type = get_config_data()["local_location_type"] webpage_covid_data = covid_API_request(location, location_type)
744
def test_row_drop_bad_condition_in_dict(): """Testing the ColDrop pipeline stage.""" with pytest.raises(ValueError): RowDrop({'a': 'bad'})
745
def create_camvid_dataset(path_from, path_to, split_train=0.8): """ Reads each `.mat` file in the `path_from` dir and creates segmentation dataset in the `path_to` dir. Assumes that `path_from` contains only `.mat` files. :path_from: str or PosixPath to folder with `.mat` files :path_to: str or PosixPath to folder where to save segmentation dataset :split_train: proportion of `train` in whole dataset; proportion of `valid`: (1 - `split_train`) """ # check splitting probability if split_train < 0 or split_train > 1: raise ValueError("Wrong 'train'/'valid' split proportion, should be in range [0, 1].") # convert all inputs to PosixPath format path_from, path_to = convert_to_pathes(path_from, path_to) # create folders if needed for dirname in ["images", "labels"]: if not os.path.exists(path_to / dirname): os.mkdir(path_to / dirname) # convert `mat` files to `png` dataset of slices and masks # perutation is needed for further random splitting to "valid"/"test" datasets. fnames = get_all_mat_files(path_from, permutate=True) for fname in tqdm(fnames): mat2png(fname, path_to=(path_to / "images"), path_masks=(path_to / "labels")) # create file with segmentation codes: 0 - Healthy, 1 - tumor, 2 - Necrosis with open(path_to / "codes.txt", "w") as file: file.write("Healthy\nTumor\nNecrosis") # create file with filenames for `valid` dataset with open(path_to / "valid.txt", "w") as file: prefixes_valid = [el.split('/')[-1][:-4] for el in fnames[int(len(fnames) * 0.8):]] # split by `.mat` file, not by `.png` slices for name_png in os.listdir(path_to / "images"): if name_png[:-8] in prefixes_valid: file.write(name_png + '\n')
746
def CalculateOSNames(os_name, os_variants): """Calculates all the names an OS can be called, according to its variants. @type os_name: string @param os_name: base name of the os @type os_variants: list or None @param os_variants: list of supported variants @rtype: list @return: list of valid names """ if os_variants: return ["%s+%s" % (os_name, v) for v in os_variants] else: return [os_name]
747
def generate_schema(schema_json, use_logical_types=False, custom_imports=None, avro_json_converter=None): """ Generate file containing concrete classes for RecordSchemas in given avro schema json :param str schema_json: JSON representing avro schema :param list[str] custom_imports: Add additional import modules :param str avro_json_converter: AvroJsonConverter type to use for default values :return Dict[str, str]: """ if avro_json_converter is None: avro_json_converter = 'avrojson.AvroJsonConverter' if '(' not in avro_json_converter: avro_json_converter += f'(use_logical_types={use_logical_types}, schema_types=__SCHEMA_TYPES)' custom_imports = custom_imports or [] names = schema.Names() make_avsc_object(json.loads(schema_json), names) names = [k for k in six.iteritems(names.names) if isinstance(k[1], (schema.RecordSchema, schema.EnumSchema))] names = sorted(names, key=lambda x: x[0]) main_out = StringIO() writer = TabbedWriter(main_out) write_preamble(writer, use_logical_types, custom_imports) write_schema_preamble(writer) write_get_schema(writer) write_populate_schemas(writer) current_namespace = tuple() for name, field_schema in names: # type: str, schema.Schema name = clean_fullname(name) namespace = tuple(name.split('.')[:-1]) if namespace != current_namespace: current_namespace = namespace if isinstance(field_schema, schema.RecordSchema): logger.debug(f'Writing schema: {clean_fullname(field_schema.fullname)}') write_schema_record(field_schema, writer, use_logical_types) elif isinstance(field_schema, schema.EnumSchema): logger.debug(f'Writing enum: {field_schema.fullname}', field_schema.fullname) write_enum(field_schema, writer) writer.set_tab(0) writer.write('\n__SCHEMA_TYPES = {') writer.tab() for name, field_schema in names: n = clean_fullname(field_schema.name) writer.write(f"\n'{n}': {n}Class,") writer.untab() writer.write('\n}\n\n') writer.write(f'_json_converter = {avro_json_converter}\n\n') value = main_out.getvalue() main_out.close() return value, [clean_fullname(name[0]) for name in names]
748
def c4x(c: Circuit, c0: int, c1: int, c2: int, c3: int, t: int) -> Circuit: """A macro of 4-controlled X gate""" return c.h[t].c4z(c0, c1, c2, c3, t).h[t]
749
def pad(data, pad_id): """ Pad all lists in data to the same length. """ width = max(len(d) for d in data) return [d + [pad_id] * (width - len(d)) for d in data]
750
def draw_boxes_on_image(img, boxes, color='blue', thickness=1, box_format=None): """ Example: >>> from netharn import util >>> img = np.zeros((10, 10, 3), dtype=np.uint8) >>> color = 'blue' >>> thickness = 1 >>> boxes = util.Boxes([[1, 1, 8, 8]], 'tlbr') >>> img2 = draw_boxes_on_image(img, boxes, color, thickness) >>> # xdoc: +REQUIRES(--show) >>> from netharn.util import mplutil >>> mplutil.autompl() # xdoc: +SKIP >>> mplutil.figure(doclf=True, fnum=1) >>> mplutil.imshow(img2) """ from netharn import util if not isinstance(boxes, util.Boxes): if box_format is None: raise ValueError('specify box_format') boxes = util.Boxes(boxes, box_format) color = tuple(util.Color(color).as255('bgr')) tlbr = boxes.to_tlbr().data img2 = img.copy() for x1, y1, x2, y2 in tlbr: # pt1 = (int(round(x1)), int(round(y1))) # pt2 = (int(round(x2)), int(round(y2))) pt1 = (int(x1), int(y1)) pt2 = (int(x2), int(y2)) img2 = cv2.rectangle(img2, pt1, pt2, color, thickness=thickness) return img2
751
def test_initism(simulation): """ Test init_ism function. """ simulation.init_ism()
752
def _select_random_features(feature_list, amount): """Selects a given amount of random features from the feature list""" set_size = len(feature_list) -1 random_features = [] for i in range(amount): while(True): random_feature = feature_list[randint(0, set_size)] if(random_feature in random_features): continue else: random_features.append(random_feature) break return random_features
753
def matrixop_inp_matr(): """ Функция возвращает матрицу, введённую пользователем с клавиатуры. Returns ------- a : [[float, float, ...], [float, float, ...], ...] Матрица, введенная пользователем """ while True: try: m = int(input('Сколько будет строк в матрице? ')) except: print('Вы ввели не число') else: if m > 0: break else: print('Вы ввели не натуральное число') while True: try: n = int(input('Сколько будет столбцов в матрице? ')) except: print('Вы ввели не число') else: if n > 0: break else: print('Вы ввели не натуральное число') print("Введите элементы матрицы (заполнение идёт по строкам)") a = [] for i in range(m): a.append([]) for j in range(n): while True: try: print(f'Введите элемент a[{i+1}][{j+1}]') elem = eval(input()) except: print('Вы ввели не число') else: break a[i].append(elem) return a
754
def gen_info(run_event_files): """Generate subject_info structure from a list of event files """ info = [] for i, event_files in enumerate(run_event_files): runinfo = Bunch(conditions=[], onsets=[], durations=[], amplitudes=[]) for event_file in event_files: _, name = os.path.split(event_file) if '.run' in name: name, _ = name.split('.run%03d'%(i+1)) elif '.txt' in name: name, _ = name.split('.txt') runinfo.conditions.append(name) event_info = np.atleast_2d(np.loadtxt(event_file)) runinfo.onsets.append(event_info[:, 0].tolist()) if event_info.shape[1] > 1: runinfo.durations.append(event_info[:, 1].tolist()) else: runinfo.durations.append([0]) if event_info.shape[1] > 2: runinfo.amplitudes.append(event_info[:, 2].tolist()) else: delattr(runinfo, 'amplitudes') info.append(runinfo) return info
755
def _SectionNameToSymbols(section_name, section_to_symbols_map): """Yields all symbols which could be referred to by section_name. If the section name is present in the map, the names in the map are returned. Otherwise, any clone annotations and prefixes are stripped from the section name and the remainder is returned. """ if (not section_name or section_name == '.text' or section_name.endswith('*')): return # Don't return anything for catch-all sections if section_name in section_to_symbols_map: for symbol in section_to_symbols_map[section_name]: yield symbol else: name = _StripPrefix(section_name) if name: yield name
756
def dropannotation(annotation_list): """ Drop out the annotation contained in annotation_list """ target = "" for c in annotation_list: if not c == "#": target += c else: return target return target
757
def TIF_to_jpg_all(path): """run TIF_to_jpg() on every TIF of a folder.""" for fname in sorted(glob.glob(path+"/*.tif")): print(fname) TIF_to_jpg(fname)
758
def iter_folders(parent_dir, outdir, target): """iterates through subfolders""" for dir in os.scandir(parent_dir): if dir.is_dir(): path = pathlib.Path(dir).absolute() ident = '.'.join(path.parts[len(path.parts)-3:]) i_path = path / 'TIF' if i_path.exists(): sippath = pathlib.Path(outdir, ident+'.zip') build_asset(sippath, i_path, target, ident)
759
def test_collection_detail(app, client): """Test collection detail endpoint""" with app.app_context(): response = client.get("/api/collections/1", content_type="text/json") data = json.loads(response.data) assert response.status_code == 200 col1 = Collection.query.filter(Collection.name == "Collection 1")[0] assert data["name"] == col1.name assert data["value"] == col1.value assert type(data["history"]) == list
760
def do_associate_latest_edit(parser, token): """ AssociateLatestEdit """ try: tag, node = token.split_contents() except ValueError: raise template.TemplateSyntaxError, "%r tag requires one argument" % token.contents.split()[0] return AssociateLatestEdit(node)
761
def read_tics_output(): """Read all the TICS register values from all the txt files. Reading all the configurations from the current directory. We assume the file has a format `CHIPNAME_frequency.txt`. """ dir_path = os.path.dirname(os.path.realpath(__file__)) all_txt = glob.glob(os.path.join(dir_path, '*.txt')) for s in all_txt: chip, freq = s.lower().split('/')[-1].strip('.txt').split('_') config = eval('_{}Config'.format(chip)) with open(s, 'r') as f: lines = [l.rstrip("\n") for l in f] for i in lines: m = re.search('[\t]*(0x[0-9A-F]*)', i) config[float(freq)] += int(m.group(1), 16),
762
def sample_flips_without_replacement() -> None: """Samples the coin flips without replacement, printing out the results.""" randomizer = ur.UniqueRandomizer() # Sample pairs of coin flips until all possible results have been sampled. while not randomizer.exhausted(): sample = flip_two_weighted_coins(randomizer) log_probability = randomizer.mark_sequence_complete() print('Sample {} is {} with probability {:2.0f}%. ' 'In total, {:3.0f}% of the output space has been sampled.'.format( randomizer.num_sequences_sampled(), sample, math.exp(log_probability) * 100, randomizer.fraction_sampled() * 100))
763
def ci_test(c, python=""): """ Test suite for continous integration testing. Installs with pip, tests with pytest and checks coverage with coverage. """ python_version = "" if len(python) == 0 else f"-p {python}" c.run(f"nox --session tests_pip {python_version}")
764
def open_monitoring_db(dbhost, dbuser, dbpass, database): """ Open MySQL monitoring DB """ try: conn = MySQLdb.connect(host=dbhost, user=dbuser, passwd=dbpass, db=database) except MySQLdb.Error, err: print "Error %d: %s" % (err.args[0], err.args[1]) sys.exit(1) return conn
765
def matdiff(matrix1,matrix2,figsize=None,cmap=None): """ display the difference between two real matrices, alongside this plot this difference on a log- colour scale (if diff!=0) """ if not figsize: figsize = defaults['figsize'] if not cmap: cmap = defaults['cmap'] _matdiff = matrix1-matrix2 f, (ax1, ax2) = plt.subplots(1,2,figsize=(2*figsize[0],figsize[1])) imreal = ax1.imshow(_matdiff,interpolation='nearest',cmap=cmap) f.colorbar(imreal,ax=ax1) # trying to plot the log-scale diff will fail if the difference is zero everywhere if not np.all(_matdiff==np.zeros(_matdiff.shape)): imimag = ax2.imshow(np.log10(np.abs(_matdiff)),interpolation='nearest',cmap=cmap) f.colorbar(imimag,ax=ax2) return f
766
def _tokens_by_class_of(tokens): """Generates lookup table of tokens in each class.""" out = defaultdict(set) for token, token_classes in tokens.items(): for token_class in token_classes: out[token_class].add(token) return out
767
def test_fileformattoml_pass_with_substitutions(): """Relative path to file should succeed. Strictly speaking not a unit test. """ context = Context({ 'k1': 'v1', 'k2': 'v2', 'k3': 'v3', 'k4': 'v4', 'k5': 'v5', 'fileFormatToml': {'in': './tests/testfiles/testsubst.toml', 'out': './tests/testfiles/out/outsubst.toml'}}) fileformat.run_step(context) assert context, "context shouldn't be None" assert len(context) == 6, "context should have 6 items" assert context['k1'] == 'v1' assert context['fileFormatToml'] == { 'in': './tests/testfiles/testsubst.toml', 'out': './tests/testfiles/out/outsubst.toml'} with open('./tests/testfiles/out/outsubst.toml') as outfile: outcontents = outfile.read() expected = """key1 = "v1value !£$% *" [key2_v2] abc = "v3 def v4" def = [ "l1", "l2 v5", "l3", ] k21 = "value" """ assert outcontents == expected # atrociously lazy test clean-up os.remove('./tests/testfiles/out/outsubst.toml')
768
def load_mnist_dataset(shape=(-1, 784), path='data'): """Load the original mnist. Automatically download MNIST dataset and return the training, validation and test set with 50000, 10000 and 10000 digit images respectively. Parameters ---------- shape : tuple The shape of digit images (the default is (-1, 784), alternatively (-1, 28, 28, 1)). path : str The path that the data is downloaded to. Returns ------- X_train, y_train, X_val, y_val, X_test, y_test: tuple Return splitted training/validation/test set respectively. Examples -------- >>> X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1,784), path='datasets') >>> X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1, 28, 28, 1)) """ return _load_mnist_dataset(shape, path, name='mnist', url='http://yann.lecun.com/exdb/mnist/')
769
def handle_move_arm_to_position(arm_position_entry, mqtt_sender): """ Tells the robot to move its Arm to the position in the given Entry box. The robot must have previously calibrated its Arm. :type arm_position_entry ttk.Entry :type mqtt_sender: com.MqttClient """ print('move_arm_to_position') mqtt_sender.send_message('move_arm_to_position',[arm_position_entry.get()])
770
def main(*args, **kwargs): """Defines the behavior of the app if run with flags and/or other parameters """ # If no command line arguments or only '-gui' flag provided, run blank GUI if (len(args) == 0 and len(kwargs) == 0) or \ (len(args) == 1 and args[0] == "-gui" and len(kwargs) == 0): call_gui() # To load a bunch of regions from files listed in a text file provide flag '-gui' and the filename(s) elif "-gui" in args and "filenames" in kwargs: instruction_lines = [] loader_instructions = { "FP": [], "FT": [], "PE": [], "ES": [], "NC": [], "CO": [], "CROP": [], "CBG": [], "SBG": [] } if "sections" in kwargs: sections_to_load = [] sections_to_load += [sec.strip() for sec in kwargs["sections"].split(";")] if "filenames" in kwargs: for filename in kwargs["filenames"].split(';'): instruction_lines += parse_batch_file(filename.strip(), sections_to_load) else: for filename in kwargs["filenames"].split(';'): instruction_lines += parse_batch_file(filename.strip()) loaded_files = [] if len(instruction_lines) > 0: for line in instruction_lines: if line is not None: parts = [l.strip().split('=') for l in line.split(';')] fname = parts[0][1].strip() # The name of the file to load is the first parameter in the line # In case the file has already been put in the loading que, skip it if fname in loaded_files: continue else: loaded_files.append(fname) for part in parts: name, value = part[0].strip(), part[1].strip() if name in loader_instructions: if name == "NC" and value == '': value = "1" if name == "CROP" and value != '': value = [v.strip() for v in value.split(':')] if name == "CROP" and value == '': value = [0, 0] # To emphasize that no cropping shall be done loader_instructions[name].append(value) if len(loader_instructions["FP"]) > 0: call_gui("-batchload", **loader_instructions) else: print("No source files were loaded. Specqp process is terminated.") sys.exit()
771
def entropy_of_states(probabilities, output_path, n_clusters): """ Computes the entropy of probabilities of states :param probabilities: array with states probabilities :type probabilities: np.ndarray :param output_path: path to output directory :type output_path: str :param n_clusters: number of clusters :type: n_clusters: int :return: entropy: calculated entropy :rtype: int """ logging.basicConfig( filename=os.path.join(output_path, 'entropy_n_clusters_{}.log'.format( n_clusters)), level=logging.INFO) entropy = scipy.stats.entropy(probabilities) logging.info('State {} entropy is {}'.format(n_clusters, entropy)) dict = {'State': n_clusters, 'entropy': entropy} with open(os.path.join(output_path, 'entropy.json'), 'w') as fp: json.dump(dict, fp) return entropy
772
def test_pv_creation(client, core_api): # NOQA """ Test creating PV using Longhorn API 1. Create volume 2. Create PV for the volume 3. Try to create another PV for the same volume. It should fail. 4. Check Kubernetes Status for the volume since PV is created. """ volume_name = "test-pv-creation" # NOQA client.create_volume(name=volume_name, size=SIZE, numberOfReplicas=2) volume = wait_for_volume_detached(client, volume_name) pv_name = "pv-" + volume_name create_pv_for_volume(client, core_api, volume, pv_name) # try to create one more pv for the volume pv_name_2 = "pv2-" + volume_name with pytest.raises(Exception) as e: volume.pvCreate(pvName=pv_name_2) assert "already exist" in str(e.value) ks = { 'pvName': pv_name, 'pvStatus': 'Available', 'namespace': '', 'pvcName': '', 'lastPVCRefAt': '', 'lastPodRefAt': '', } wait_volume_kubernetes_status(client, volume_name, ks) delete_and_wait_pv(core_api, pv_name)
773
def EntryToSlaveName(entry): """Produces slave name from the slaves config dict.""" name = entry.get('slavename') or entry.get('hostname') if 'subdir' in entry: return '%s#%s' % (name, entry['subdir']) return name
774
def create_app(config_class=Config): """ Constructs a Flask application instance. Parameters ---------- config_class: class that stores the configuration variables. Returns ------- app : Flask application """ app = Flask(__name__) app.config.from_object(config_class) bootstrap.init_app(app) from app.main import bp as main_bp app.register_blueprint(main_bp) from app.errors import bp as errors_bp app.register_blueprint(errors_bp) from app.api import bp as api_bp app.register_blueprint(api_bp, url_prefix='/api') if not os.path.exists('logs'): os.mkdir('logs') file_handler = RotatingFileHandler('logs/oslo-bysykkel-monitor.log', maxBytes=10240, backupCount=10) file_handler.setFormatter(logging.Formatter( '%(asctime)s %(levelname)s: %(message)s ' '[in %(pathname)s:%(lineno)d]')) file_handler.setLevel(logging.INFO) app.logger.addHandler(file_handler) app.logger.setLevel(logging.INFO) app.logger.info('Oslo Bysykkel Monitor startup') return app
775
def is_symmetric_re(root: TreeNode) -> bool: """Check if a binary tree is a mirror of itself (symmetric around its center).""" if not root: return False def is_mirror(t1, t2): if not t1 and not t2: return True if not t1 or not t2: return False return t1.val == t2.val and is_mirror(t1.left, t2.right) and is_mirror(t1.right, t2.left) return is_mirror(root, root)
776
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool: """Set up paperless from a config entry.""" hass.config_entries.async_setup_platforms(entry, PLATFORMS) return True
777
def azimuthal_average(image, center=None, stddev=True, binsize=0.5, interpnan=False): """ Modified based on https://github.com/keflavich/image_tools/blob/master/image_tools/radialprofile.py Calculate the azimuthally averaged radial profile. Parameters: imgae (numpy ndarray): 2-D image center (list): [x, y] pixel coordinates. If None, use image center. Note that x is horizontal and y is vertical, y, x = image.shape. stdev (bool): if True, the stdev of profile will also be returned. binsize (float): size of the averaging bin. Can lead to strange results if non-binsize factors are used to specify the center and the binsize is too large. interpnan (bool): Interpolate over NAN values, i.e. bins where there is no data? Returns: If `stdev == True`, it will return [radius, profile, stdev]; else, it will return [radius, profile]. """ # Calculate the indices from the image y, x = np.indices(image.shape) if center is None: center = np.array([(x.max() - x.min()) / 2.0, (y.max() - y.min()) / 2.0]) r = np.hypot(x - center[0], y - center[1]) # The 'bins' as initially defined are lower/upper bounds for each bin # so that values will be in [lower,upper) nbins = int(np.round(r.max() / binsize) + 1) maxbin = nbins * binsize bins = np.linspace(0, maxbin, nbins + 1) # We're probably more interested in the bin centers than their left or right sides... bin_centers = (bins[1:] + bins[:-1]) / 2.0 # There are never any in bin 0, because the lowest index returned by digitize is 1 nr = np.histogram(r, bins)[0] # nr is how many pixels are within each bin # Radial profile itself profile = np.histogram(r, bins, weights=image)[0] / nr if interpnan: profile = np.interp(bin_centers, bin_centers[~np.isnan(profile)], profile[~np.isnan(profile)]) if stddev: # Find out which radial bin each point in the map belongs to # recall that bins are from 1 to nbins whichbin = np.digitize(r.ravel(), bins) profile_std = np.array([image.ravel()[whichbin == b].std() for b in range(1, nbins + 1)]) profile_std /= np.sqrt(nr) # 均值的偏差 return [bin_centers, profile, profile_std] else: return [bin_centers, profile]
778
def assign_probe_int(probe: SimHandleBase, val: int): """Assign int val to int var. Use for debug to display python int in waveforms""" if not isinstance(probe, SimHandleBase): return probe.value = val
779
def find_assign(data, varname): """Finds a substring that looks like an assignment. :param data: Source to search in. :param varname: Name of the variable for which an assignment should be found. """ ASSIGN_RE = re.compile(BASE_ASSIGN_PATTERN.format(varname)) if len(ASSIGN_RE.findall(data)) > 1: raise PluginError('Found multiple {}-strings.'.format(varname)) if len(ASSIGN_RE.findall(data)) < 1: raise PluginError('No version assignment ("{}") found.' .format(varname)) return ASSIGN_RE.search(data).group(2)
780
def test_uninstalled_non_existing(kb): """ test wusa.uninstalled when the kb is not installed """ mock_installed = MagicMock(return_value=False) with patch.dict(wusa.__salt__, {"wusa.is_installed": mock_installed}): returned = wusa.uninstalled(name=kb) expected = { "changes": {}, "comment": "{} already uninstalled".format(kb), "name": kb, "result": True, } assert expected == returned
781
def remove(token: str, server: str="http://localhost:8080/remove", params: dict=None) -> int: """ Removes the data associated with the token. :param token: the token to download the data for :type token: str :param server: the URL of the server to upload to :type server: str :param params: the additional parameters to send to the server, eg login information (user/password) :type params: dict :return: the status code, None if failed to download :rtype: int """ if params is None: files = {} else: files = params.copy() files['token'] = token r = requests.post(server, files=files) return r.status_code
782
def get_pending_surveys_batch_number(batch_no): """ Gets batch number for the shared survey :param batch_no: Shared survey batch number :type batch_no: str :raises ApiError: Raised when party returns api error :return: list share surveys """ bound_logger = logger.bind(batch_no=batch_no) bound_logger.info("Attempting to retrieve share surveys by batch number") url = f"{app.config['PARTY_URL']}/party-api/v1/pending-surveys/{batch_no}" response = requests.get(url, auth=app.config["BASIC_AUTH"]) try: response.raise_for_status() except requests.exceptions.HTTPError: bound_logger.error("Failed to retrieve share surveys by batch number") raise ApiError(logger, response) bound_logger.info("Successfully retrieved share surveys by batch number") return response
783
def before_after_to_box(element, pseudo_type, state, style_for, get_image_from_uri, target_collector): """Return the boxes for ::before or ::after pseudo-element.""" style = style_for(element, pseudo_type) if pseudo_type and style is None: # Pseudo-elements with no style at all do not get a style dict. # Their initial content property computes to 'none'. return [] # TODO: should be the computed value. When does the used value for # `display` differ from the computer value? It's at least wrong for # `content` where 'normal' computes as 'inhibit' for pseudo elements. display = style['display'] content = style['content'] if 'none' in (display, content) or content in ('normal', 'inhibit'): return [] box = make_box('%s::%s' % (element.tag, pseudo_type), style, []) quote_depth, counter_values, _counter_scopes = state update_counters(state, style) children = [] outside_markers = [] if display == 'list-item': marker_boxes = marker_to_box( element, state, style, style_for, get_image_from_uri, target_collector) if marker_boxes: if style['list_style_position'] == 'outside': outside_markers.extend(marker_boxes) else: children.extend(marker_boxes) children.extend(content_to_boxes( style, box, quote_depth, counter_values, get_image_from_uri, target_collector)) box.children = children return outside_markers + [box]
784
def get_beads_MDA_atomgroups(ns): """For each CG bead, create atom groups for trajectory geoms calculation using mass and atom weights across beads. ns requires: mapping_type atom_w aa_universe ns creates: mda_beads_atom_grps mda_weights_atom_grps """ ns.mda_beads_atom_grps, ns.mda_weights_atom_grps = dict(), dict() for bead_id in ns.atom_w: try: # print("Created bead_id", bead_id, "using atoms", [atom_id for atom_id in ns.atom_w[bead_id]]) if ns.mapping_type == "COM": ns.mda_beads_atom_grps[bead_id] = mda.AtomGroup([atom_id for atom_id in ns.atom_w[bead_id]], ns.aa_universe) ns.mda_weights_atom_grps[bead_id] = np.array( [ns.atom_w[bead_id][atom_id] * ns.aa_universe.atoms[atom_id].mass for atom_id in ns.atom_w[bead_id]]) elif ns.mapping_type == "COG": ns.mda_beads_atom_grps[bead_id] = mda.AtomGroup([atom_id for atom_id in ns.atom_w[bead_id]], ns.aa_universe) ns.mda_weights_atom_grps[bead_id] = np.array([1 for _ in ns.atom_w[bead_id]]) except IndexError as e: msg = ( f"An ID present in your mapping (NDX) file could not be found in the AA trajectory. " f"Please check your mapping (NDX) file.\nSee the error below to understand which " f"ID (here 0-indexed) could not be found:\n\n{str(e)}" ) raise exceptions.MissformattedFile(msg)
785
def test_remove_one_child_left(test_bsts): """Test delete node one child, left.""" test_bsts[4].delete(3) assert not test_bsts[4].contains(3) assert test_bsts[4].size() is 3
786
def pool_delete(transport, request, pool_name): """Deletes the pool `pool_name` :param transport: Transport instance to use :type transport: `transport.base.Transport` :param request: Request instance ready to be sent. :type request: `transport.request.Request` :param pool_name: Pool reference name. :type pool_name: `six.text_type` """ request.operation = 'pool_delete' request.params['pool_name'] = pool_name transport.send(request)
787
def make_sign_initializer(random_sign_init): """Random sign intitializer for HyperBatchEnsemble layers.""" if random_sign_init > 0: return ed.initializers.RandomSign(random_sign_init) else: return tf.keras.initializers.RandomNormal( mean=1.0, stddev=-random_sign_init)
788
def _parallel_predict_proba(ensemble, X, idx, results): """ Compute predictions of SCM estimators """ for k in idx: res = ensemble.estimators[k].predict(X[:, ensemble.estim_features[k]]) results = results + res return results
789
def compute_FP_TP_Probs(Ycorr, Xcorr, Probs, is_tumor, evaluation_mask, Isolated_Tumor_Cells, level): """Generates true positive and false positive stats for the analyzed image Args: Probs: list of the Probabilities of the detected lesions Xcorr: list of X-coordinates of the lesions Ycorr: list of Y-coordinates of the lesions is_tumor: A boolean variable which is one when the case cotains tumor evaluation_mask: The evaluation mask Isolated_Tumor_Cells: list of labels containing Isolated Tumor Cells level: The level at which the evaluation mask was made Returns: FP_probs: A list containing the probabilities of the false positive detections TP_probs: A list containing the probabilities of the True positive detections NumberOfTumors: Number of Tumors in the image (excluding Isolate Tumor Cells) detection_summary: A python dictionary object with keys that are the labels of the lesions that should be detected (non-ITC tumors) and values that contain detection details [confidence score, X-coordinate, Y-coordinate]. Lesions that are missed by the algorithm have an empty value. FP_summary: A python dictionary object with keys that represent the false positive finding number and values that contain detection details [confidence score, X-coordinate, Y-coordinate]. """ max_label = np.amax(evaluation_mask) FP_probs = [] TP_probs = np.zeros((max_label,), dtype=np.float32) detection_summary = {} FP_summary = {} for i in range(1, max_label + 1): if i not in Isolated_Tumor_Cells: label = 'Label ' + str(i) detection_summary[label] = [] FP_counter = 0 if (is_tumor): for i in range(0, len(Xcorr)): # note: the x, y coordinates are switched, I make the x, y to be int, so that the array of evaluation_mask #HittedLabel = evaluation_mask[int(Xcorr[i] / pow(2, level)), int(Ycorr[i] / pow(2, level))] HittedLabel = evaluation_mask[int( Ycorr[i]/pow(2, level)), int(Xcorr[i]/pow(2, level))] print(HittedLabel) # HittedLabel = evaluation_mask[int(Ycorr[i]/pow(2, level)), int(Xcorr[i]/pow(2, level))] # HittedLabel = evaluation_mask[Ycorr[i]/pow(2, level), Xcorr[i]/pow(2, level)] if HittedLabel == 0: FP_probs.append(Probs[i]) key = 'FP ' + str(FP_counter) FP_summary[key] = [Probs[i], Xcorr[i], Ycorr[i]] FP_counter += 1 elif HittedLabel not in Isolated_Tumor_Cells: if (Probs[i] > TP_probs[HittedLabel - 1]): label = 'Label ' + str(HittedLabel) detection_summary[label] = [Probs[i], Xcorr[i], Ycorr[i]] TP_probs[HittedLabel - 1] = Probs[i] else: for i in range(0, len(Xcorr)): FP_probs.append(Probs[i]) key = 'FP ' + str(FP_counter) FP_summary[key] = [Probs[i], Xcorr[i], Ycorr[i]] FP_counter += 1 print(FP_counter) num_of_tumors = max_label - len(Isolated_Tumor_Cells) # just for diagnose print('number of isolated tumor cells =', len(Isolated_Tumor_Cells)) return FP_probs, TP_probs, num_of_tumors, detection_summary, FP_summary
790
def main(token, language, account): """ Get the account balances on Revolut """ if token is None: print("You don't seem to have a Revolut token") answer = input("Would you like to generate a token [yes/no]? ") selection(answer) while token is None: try: token = get_token() except Exception as e: login_error_handler(e) rev = Revolut(device_id=_CLI_DEVICE_ID, token=token) account_balances = rev.get_account_balances() if account: print(account_balances.get_account_by_name(account).balance) else: print(account_balances.csv(lang=language))
791
def split_sushi_data(K): """ Needs to be run once on raw sushi data before starting sushi experiments Splits edges in set b of sushi data with cross validation Makes sure no node is shared in train and test sets Saves splits and scores """ print('Reading sushi data...') home_path = str(Path.home()) features, _, _, edges, scores = \ read_sushi_data(home_path + '/Data/sushi3-2016/') print('Splitting edges per fold...') splits = cv_edges(edges, K) for i, split in enumerate(splits): print('For split %i, get stats_u, train_u...' % i) train_e, test_e = split print('Train edge count before stats/train split: %i' % len(train_e)) train_u = get_unq_nodes(train_e) N = len(train_u)//2 stats_u = train_u[N:] train_u = train_u[:N] for edge in train_e: u, v = edge if u in train_u and v in train_u: continue else: train_e.remove(edge) test_u = get_unq_nodes(test_e) print('Train edge count after split: %i' % len(train_e)) with open(home_path + '/Data/sushi3-2016/split%i' % i, 'wb+') as f: pickle.dump([stats_u, train_u, train_e, test_u, test_e], f) with open(home_path + '/Data/sushi3-2016/features', 'wb+') as f: pickle.dump(features, f) with open(home_path + '/Data/sushi3-2016/scores', 'wb+') as f: pickle.dump(scores, f)
792
def make_sentences(text, src): """ Builds a list of dictionaries, one for each sentence resulting from the sentence parser. The dictionary schema is {"src": src, "label": 0, "sentence": sent} Substitutions are made for the identified tokens. Args: text (str): text to process src (str): identifier (file name) to include in the output Returns: List[Dict] """ no_sec = True text = text.replace(USC_DOT, USC) text = text.replace(PL, PL_SPACE) text = text.replace(EO, EO_SPACE) sents = [scrubber(sent, no_sec=no_sec) for sent in sent_tokenize(text)] sent_list = list() for sent in sents: if not sent: continue sent_list.append({"src": src, "label": 0, "sentence": sent}) return sent_list
793
def read_test_case(file_path): """ reads one test case from file. returns contents of test case Parameters ---------- file_path : str the path of the test case file to read. Returns ------- list a list of contents of the test case. """ file = open(file_path, "r") number = int(file.readline().strip()) case = list() for i in range(number): case.append(file.readline().strip()) file.close() return case
794
def get_order_cart_product_options_output(cart_id: Optional[pulumi.Input[str]] = None, catalog_name: Optional[pulumi.Input[Optional[str]]] = None, plan_code: Optional[pulumi.Input[str]] = None, product: Optional[pulumi.Input[str]] = None, opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetOrderCartProductOptionsResult]: """ Use this data source to access information about an existing resource. """ ...
795
def add_checkbox_column(col_list, row_list, checkbox_pos=1): """Insert a new column into the list of column dictionaries so that it is the second column dictionary found in the list. Also add the checkbox column header to the list of row dictionaries and subsequent checkbox value 'col_list'- a list of dictionaries that defines the column structure for the table (required). The order of the columns from left to right is depicted by the index of the column dictionary in the list. Each dictionary in the list has the following keys and values: 'name' - a string for the column name (required) 'total' - a boolean for whether the column should be totaled (required) 'row_list' - a list of dictionaries that represent the rows. Each dictionaries keys should match the column names found in 'col_list' (required) Example: [{col_name_1: value, col_name_2: value, ...}, {col_name_1: value, col_name_2: value, ...}, ...] checkbox_pos - an integer for the position of the checkbox column. Defaulted at 1 (optional) returns - a tuple of the updated column and rows list of dictionaries in that order""" LOGGER.debug('Adding a checkbox column to the column structure') # Insert a new column dictionary in the list in the second spot col_list.insert(checkbox_pos, {'name':'Select', 'total':False, 'attr':{'class':'checkbox'}, 'td_class':'checkbox'}) # For each dictionary in the row list add a 'Select' key which # refers to the new column and set the value as a checkbox for val in row_list: val['Select'] = '<input type=checkbox name=cb value=1>' # Return a tuple of the updated / modified column and row list of # dictionaries return (col_list, row_list)
796
def save_make_lines(filename, make_lines): """ Save entries of make_lines as lines in filename """ with open(filename, mode="w") as f: for line in make_lines: f.write(line+"\n")
797
def accuracy4batch(model, testloader, criterion): """save a model checkpoint INPUT: model: pytorch nn model. testloader: DataLoader. test data set criterion: criterion. loss criterion device: torch.device. device on which model/data is based OUTPUT: accuracy: float in [0:1]. percenct proportion of correct classifications in testloader test_loss: float. absolute error """ test_loss = 0 accuracy = 0 model.eval() with torch.no_grad(): for inputs, labels in testloader: inputs, labels = inputs.to(model.device), labels.to(model.device) logps = model.forward(inputs) batch_loss = criterion(logps, labels) test_loss += batch_loss.item() # Calculate accuracy ps = torch.exp(logps) top_p, top_class = ps.topk(1, dim=1) equals = top_class == labels.view(*top_class.shape) accuracy += torch.mean(equals.type(torch.FloatTensor)).item() accuracy = accuracy/len(testloader) return accuracy, test_loss
798
def AmendmentLinks(amendment, users_by_id, project_name): """Produce a list of value/url pairs for an Amendment PB. Args: amendment: Amendment PB to display. users_by_id: dict {user_id: user_view, ...} including all users mentioned in amendment. project_nme: Name of project the issue/comment/amendment is in. Returns: A list of dicts with 'value' and 'url' keys. 'url' may be None. """ # Display both old and new summary if amendment.field == tracker_pb2.FieldID.SUMMARY: result = amendment.newvalue if amendment.oldvalue: result += ' (was: %s)' % amendment.oldvalue return [{'value': result, 'url': None}] # Display new owner only elif amendment.field == tracker_pb2.FieldID.OWNER: if amendment.added_user_ids and amendment.added_user_ids[0] > 0: uid = amendment.added_user_ids[0] return [{'value': users_by_id[uid].display_name, 'url': None}] else: return [{'value': framework_constants.NO_USER_NAME, 'url': None}] elif amendment.field in (tracker_pb2.FieldID.BLOCKEDON, tracker_pb2.FieldID.BLOCKING, tracker_pb2.FieldID.MERGEDINTO): values = amendment.newvalue.split() bug_refs = [_SafeParseIssueRef(v.strip()) for v in values] issue_urls = [FormatIssueURL(ref, default_project_name=project_name) for ref in bug_refs] # TODO(jrobbins): Permission checks on referenced issues to allow # showing summary on hover. return [{'value': v, 'url': u} for (v, u) in zip(values, issue_urls)] elif amendment.newvalue: # Catchall for everything except user-valued fields. return [{'value': v, 'url': None} for v in amendment.newvalue.split()] else: # Applies to field==CC or CUSTOM with user type. values = _PlusMinusString( [users_by_id[uid].display_name for uid in amendment.added_user_ids if uid in users_by_id], [users_by_id[uid].display_name for uid in amendment.removed_user_ids if uid in users_by_id]) return [{'value': v.strip(), 'url': None} for v in values.split()]
799