content
stringlengths
22
815k
id
int64
0
4.91M
def gains2utvec(g): """Converts a vector into an outer product matrix and vectorizes its upper triangle to obtain a vector in same format as the CHIME visibility matrix. Parameters ---------- g : 1d array gain vector Returns ------- 1d array with vectorized form of upper triangle for the outer product of g """ n = len(g) G = np.dot(g.reshape(n, 1), g.conj().reshape(1, n)) return mat2utvec(G)
200
def get_user_surveys(user: User) -> List[Dict]: """ Returns a list of all surveys created by specific user with survey secret. """ return list(map(Survey.get_api_brief_result_with_secrets, db.get_all_surveys(user)))
201
def velocity_clb(data): """ Get current velocity from FCU :param data: velocity from NED """ global norm_velocity norm_velocity = np.linalg.norm(np.array([data.twist.linear.x, data.twist.linear.y]))
202
def combine_mpgs(objs, cls=None): """ Combine multiple multipart geometries into a single multipart geometry of geometry collection. """ # Generate new list of individual geometries new = [] for obj in objs: if isinstance(obj, shapely.geometry.base.BaseMultipartGeometry): new.extend(list(obj)) elif isinstance(obj, shapely.geometry.base.BaseGeometry): new.extend([obj]) else: raise TypeError("Invalid geometry type") # Convert list to geometry collection or provided class if cls is None: new = shapely.geometry.collection.GeometryCollection(new) else: new = cls(new) return new
203
def get_polygon_point_dist(poly, pt): """Returns the distance between a polygon and point. Parameters ---------- poly : libpysal.cg.Polygon A polygon to compute distance from. pt : libpysal.cg.Point a point to compute distance from Returns ------- dist : float The distance between ``poly`` and ``point``. Examples -------- >>> poly = Polygon([Point((0, 0)), Point((1, 0)), Point((1, 1)), Point((0, 1))]) >>> pt = Point((2, 0.5)) >>> get_polygon_point_dist(poly, pt) 1.0 >>> pt2 = Point((0.5, 0.5)) >>> get_polygon_point_dist(poly, pt2) 0.0 """ if get_polygon_point_intersect(poly, pt) is not None: dist = 0.0 else: part_prox = [] for vertices in poly._vertices: vx_range = range(-1, len(vertices) - 1) seg = lambda i: LineSegment(vertices[i], vertices[i + 1]) _min_dist = min([get_segment_point_dist(seg(i), pt)[0] for i in vx_range]) part_prox.append(_min_dist) dist = min(part_prox) return dist
204
def score_bearing( wanted: LocationReferencePoint, actual: PointOnLine, is_last_lrp: bool, bear_dist: float ) -> float: """Scores the difference between expected and actual bearing angle. A difference of 0° will result in a 1.0 score, while 180° will cause a score of 0.0.""" bear = compute_bearing(wanted, actual, is_last_lrp, bear_dist) return score_angle_sector_differences(wanted.bear, bear)
205
def test_state_break_larger(): """Stop the simulation once the value of a state is larger than a preset value """ sim = Sim() sys = VanDerPol() sys.add_break_greater("y",1.0) sim.add_system(sys) sim.simulate(20,0.01) #If correct the simulation should break at time 0.79 assert sys.res.time[-1] == 0.79
206
def load_graph (graph_path): """ load a graph from JSON """ with open(graph_path) as f: data = json.load(f) graph = json_graph.node_link_graph(data, directed=True) return graph
207
def assign_bond_states_to_dataframe(df: pd.DataFrame) -> pd.DataFrame: """ Takes a ``PandasPDB`` atom dataframe and assigns bond states to each atom based on: Atomic Structures of all the Twenty Essential Amino Acids and a Tripeptide, with Bond Lengths as Sums of Atomic Covalent Radii Heyrovska, 2008 First, maps atoms to their standard bond states (:const:`~graphein.protein.resi_atoms.DEFAULT_BOND_STATE`). Second, maps non-standard bonds states (:const:`~graphein.protein.resi_atoms.RESIDUE_ATOM_BOND_STATE`). Fills NaNs with standard bond states. :param df: Pandas PDB dataframe :type df: pd.DataFrame :return: Dataframe with added ``atom_bond_state`` column :rtype: pd.DataFrame """ # Map atoms to their standard bond states naive_bond_states = pd.Series(df["atom_name"].map(DEFAULT_BOND_STATE)) # Create series of bond states for the non-standard states ss = ( pd.DataFrame(RESIDUE_ATOM_BOND_STATE) .unstack() .rename_axis(("residue_name", "atom_name")) .rename("atom_bond_state") ) # Map non-standard states to the dataframe based on the residue and atom name df = df.join(ss, on=["residue_name", "atom_name"]) # Fill the NaNs with the standard states df = df.fillna(value={"atom_bond_state": naive_bond_states}) return df
208
def strong_components(vs, g): """ Output all vertices of g reachable from any of vs enumerated by strong components. The components are outputted in reversed topological order. """ c = collections.deque(topo_sort(vs, g)) nest = 0 current_comp = 0 for label, v in dfs_iterative(GraphFilteredView(g.reversed(), set(c)), c): if label == "enter": nest += 1 yield current_comp, v else: nest -= 1 if nest == 0: current_comp += 1
209
def extract_features(): """ extract features for cifar10 """ model = tiny_imagenet_100_a_model() training_features = model.predict(X_train, verbose=1) validation_features = model.predict(X_val, verbose=1) testing_features = model.predict(X_test, verbose=1) with h5py.File('cifar10_features.h5', 'w') as f: f.create_dataset(name='training_features', data=training_features) f.create_dataset(name='validation_features', data=validation_features) f.create_dataset(name='testing_features', data=testing_features)
210
def extend_cfg(cfg): """ Add new config variables. E.g. from yacs.config import CfgNode as CN cfg.TRAINER.MY_MODEL = CN() cfg.TRAINER.MY_MODEL.PARAM_A = 1. cfg.TRAINER.MY_MODEL.PARAM_B = 0.5 cfg.TRAINER.MY_MODEL.PARAM_C = False """ from yacs.config import CfgNode as CN cfg.TRAINER.OURS = CN() cfg.TRAINER.OURS.N_CTX = 10 # number of context vectors cfg.TRAINER.OURS.CSC = False # class-specific context cfg.TRAINER.OURS.CTX_INIT = "" # initialize context vectors with given words cfg.TRAINER.OURS.WEIGHT_U = 0.1
211
def lastero(f, B=None): """ Last erosion. y = lastero(f, B=None) `lastero` creates the image y by computing the last erosion by the structuring element B of the image f . The objects found in y are the objects of the erosion by nB that can not be reconstructed from the erosion by (n+1)B , where n is a generic non negative integer. The image y is a proper subset of the morphological skeleton by B of f . Parameters ---------- f : Binary image. B : Structuring Element (default: 3x3 elementary cross). Returns ------- y : Binary image. """ assert isbinary(f),'pymorph.lastero: can only process binary images' if B is None: B = secross() dt = dist(f,B) return regmax(dt,B)
212
def Sparse2Raster(arr, x0, y0, epsg, px, py, filename="", save_nodata_as=-9999): """ Sparse2Rastersave_nodata_as """ BS = 256 geotransform = (x0, px, 0.0, y0, 0.0, -(abs(py))) srs = osr.SpatialReference() srs.ImportFromEPSG(int("%s" % (epsg))) projection = srs.ExportToWkt() if issparse(arr): m, n = arr.shape if m > 0 and n > 0: dtype = str(arr.dtype) if dtype in ["uint8"]: fmt = gdal.GDT_Byte elif dtype in ["uint16"]: fmt = gdal.GDT_UInt16 elif dtype in ["uint32"]: fmt = gdal.GDT_UInt32 elif dtype in ["float32"]: fmt = gdal.GDT_Float32 elif dtype in ["float64"]: fmt = gdal.GDT_Float64 else: fmt = gdal.GDT_Float64 CO = ["BIGTIFF=YES", "TILED=YES", "BLOCKXSIZE=256", "BLOCKYSIZE=256", 'COMPRESS=LZW'] driver = gdal.GetDriverByName("GTiff") dataset = driver.Create(filename, n, m, 1, fmt, CO) if (geotransform != None): dataset.SetGeoTransform(geotransform) if (projection != None): dataset.SetProjection(projection) band = dataset.GetRasterBand(1) band.SetNoDataValue(save_nodata_as) for i in range(0, m, BS): for j in range(0, n, BS): BY = min(m - i, BS) BX = min(n - j, BS) a = arr[i:i + BY, j:j + BX].todense() if save_nodata_as==0 and (np.isnan(a)).all(): #do nothing pass else: band.WriteArray(a, j, i) dataset = None return filename return None
213
def modularity(modules, G, L): """ calculate modularity modularity = [list of nx.Graph objects] G = graph L = num of links """ N_m = len(modules) M = 0.0 for s in range(N_m): l_s = 0.0 d_s = 0 for i in modules[s]: l_s += float(modules[s].degree(i)) d_s += float(G.degree(i)) M += (l_s / L) - (d_s / (2.0 * L))**2 return M
214
def nonan_compstat_tstat_scan(dist, aInd, bInd, returnMaxInds = False): """ For local sieve analysis, compare A and B group for each site using a max t-statistic over a parameter space filteredDist: [ptid x sites x params] ndarray Returns tstat array [sites] aInd, bInd: Boolean row index for the two groups """ a = dist[aInd] b = dist[bInd] aN = aInd.sum() bN = bInd.sum() tstat = tstatistic(a, b, axis = 0, equal_var = False) """se = np.sqrt((aN-1)*np.var(a,axis=0)/((aN+bN) - 2) + (bN-1)*np.var(b,axis=0)/((aN+bN) - 2)) tstat = (np.mean(a,axis=0) - np.mean(b,axis=0)) / se""" """Even in the nonan cases, the tstat can be nan if there is no variation in either group (divide by zero)""" sitesNani = np.all(np.isnan(tstat), axis=1) """For sites with all nans across params, set all to 0. this makes maxi = 0""" tstat[sitesNani,:] = 0 """Zeros are better than returning nan because if this perm produces a nan result then it is not as extreme as observed (which is probably also nan)""" maxi = np.nanargmax(np.abs(tstat), axis=1) inds = np.ravel_multi_index((np.arange(maxi.shape[0]), maxi), tstat.shape) if not returnMaxInds: return tstat.flat[inds] else: return tstat.flat[inds], maxi
215
def path_to_key(path, base): """Return the relative path that represents the absolute path PATH under the absolute path BASE. PATH must be a path under BASE. The returned path has '/' separators.""" if path == base: return '' if base.endswith(os.sep) or base.endswith('/') or base.endswith(':'): # Special path format on Windows: # 'C:/' Is a valid root which includes its separator ('C:/file') # 'C:' is a valid root which isn't followed by a separator ('C:file') # # In this case, we don't need a separator between the base and the path. pass else: # Account for a separator between the base and the relpath we're creating base += os.sep assert path.startswith(base), "'%s' is not a prefix of '%s'" % (base, path) return to_relpath(path[len(base):])
216
def extract_digits_from_end_of_string(input_string): """ Gets digits at the end of a string :param input_string: str :return: int """ result = re.search(r'(\d+)$', input_string) if result is not None: return int(result.group(0))
217
def move_bdim_to_front(x, result_ndim=None): """ Returns a tensor with a batch dimension at the front. If a batch dimension already exists, move it. Otherwise, create a new batch dimension at the front. If `result_ndim` is not None, ensure that the resulting tensor has rank equal to `result_ndim`. """ x_dim = len(x.shape) x_bdim = x.bdim if x_bdim is None: x = torch.unsqueeze(x, 0) else: x = torch.movedim(x, x_bdim, 0) if result_ndim is None: return x diff = result_ndim - x_dim - (x_bdim is None) for _ in range(diff): x = torch.unsqueeze(x, 1) return x
218
def print_positions(jdate): """Function to format and print positions of the bodies for a date""" print('\n') print('------------- Bodies Positions -------------') for index, pos in ndenumerate(positions(jdate)): sign, degs, mins, secs = body_sign(pos) retro = ', R' if is_retrograde(jdate, *index) else '' print(f"{body_name(*index):10}: " f"{signs[sign]:15}{degs:>2}º{mins:>2}'{secs:>2}\"{retro}")
219
def paginate(**options): """ Automatically force request pagination for endpoints that shouldn't return all items in the database directly. If this decorator is used, ``limit`` and ``offset`` request arguments are automatically included in the request. The burden is then on developers to do something with those ``limit`` and ``offset`` arguments. An example request header set by this decorator is as follows: .. code-block:: text Link: <https://localhost/items?limit=50&offset=50>; rel="next", <https://localhost/items?limit=50&offset=500>; rel="last" Args: limit (int): Number of entries to limit a query by. total (int, callable): Number or callable for determining the total number of records that can be returned for the request. This is used in determining the pagination header. """ if 'total' not in options: raise AssertionError( '`@paginate` decorator requires `total=` parameter ' 'for determining total number of records to paginate. ' 'See the documentation for more details.') def decorator(func): @wraps(func) def inner(*args, **kwargs): # only paginate on get requests if request.method != 'GET': return func(*args, **kwargs) # format parameters limit = request.args.get('limit', options.get('limit')) offset = int(request.args.get('offset', options.get('offset', 0))) total = options['total']() if callable(options['total']) else options['total'] url = options.get('url', request.base_url) # config request parameters request.args = request.args.copy() request.args.setdefault('limit', limit) request.args.setdefault('offset', offset) # if no need to paginate, return without setting headers if limit is None: return func(*args, **kwargs) limit = int(limit) # add next page link headers = {} next_page = '<{}?limit={}&offset={}>'.format(url, limit, offset + limit) headers['Link'] = '{}; rel="next"'.format(next_page) # add last page link and header if options['total'] is not None: total = options['total']() if callable(options['total']) else options['total'] last_page = '<{}?limit={}&offset={}>'.format(url, limit, offset + limit) headers['Link'] += ', {}; rel="last"'.format(last_page) headers['X-Total-Count'] = str(total) # call the function and create response response = func(*args, **kwargs) # if a specific response has already been crafted, use it if isinstance(response, Response): return response # normalize response data if not isinstance(response, tuple): response = [response] response = list(response) if hasattr(response[0], 'json'): content_length = len(response[0].json) else: content_length = len(response[0]) if len(response) == 1: response.append(200) if len(response) == 2: response.append({}) # if the response data is equal to the pagination, it's # truncated and needs updated headers/status if content_length == limit: response[1] = 206 response[2].update(headers) return tuple(response) return inner return decorator
220
def fetch_basc_vascular_atlas(n_scales='scale007', target_affine=np.diag((5, 5, 5))): """ Fetch the BASC brain atlas given its resolution. Parameters ---------- hrf_atlas: str, BASC dataset name possible values are: 'scale007', 'scale012', 'scale036', 'scale064', 'scale122', 'scale197', 'scale325', 'scale444' target_affine : np.array, (default=np.diag((5, 5, 5))), affine matrix for the produced Nifti images Return ------ mask_full_brain : Nifti Image, full mask brain atlas_rois : Nifti Image, ROIs atlas """ if n_scales not in valid_scales: raise ValueError(f"n_scales should be in {valid_scales}, " f"got '{n_scales}'") basc_dataset = datasets.fetch_atlas_basc_multiscale_2015(version='sym') atlas_rois_fname = basc_dataset[n_scales] atlas_to_return = image.load_img(atlas_rois_fname) atlas_to_return = image.resample_img(atlas_to_return, target_affine, interpolation='nearest') brain_mask = image_nilearn.binarize_img(atlas_to_return, threshold=0) return brain_mask, atlas_to_return
221
def _get_time_slices( window_start, window, projection, # Defer calling until called by test code resampling_scale, lag = 1, ): """Extracts the time slice features. Args: window_start: Start of the time window over which to extract data. window: Length of the window (in days). projection: projection to reproject all data into. resampling_scale: length scale to resample data to. lag: Number of days before the fire to extract the features. Returns: A list of the extracted EE images. """ image_collections, time_sampling = _get_all_image_collections() window_end = window_start.advance(window, 'day') drought = image_collections['drought'].filterDate( window_start.advance(-lag - time_sampling['drought'], 'day'), window_start.advance( -lag, 'day')).median().reproject(projection).resample('bicubic') vegetation = image_collections['vegetation'].filterDate( window_start.advance(-lag - time_sampling['vegetation'], 'day'), window_start.advance( -lag, 'day')).median().reproject(projection).resample('bicubic') weather = image_collections['weather'].filterDate( window_start.advance(-lag - time_sampling['weather'], 'day'), window_start.advance(-lag, 'day')).median().reproject( projection.atScale(resampling_scale)).resample('bicubic') fire = image_collections['fire'].filterDate(window_start, window_end).map( ee_utils.remove_mask).max() detection = fire.clamp(6, 7).subtract(6).rename('detection') return [drought, vegetation, weather, fire, detection]
222
def hash_parameters(keys, minimize=True, to_int=None): """ Calculates the parameters for a perfect hash. The result is returned as a HashInfo tuple which has the following fields: t The "table parameter". This is the minimum side length of the table used to create the hash. In practice, t**2 is the maximum size of the output hash. slots The original inputs mapped to a vector. This is the hash function. r The displacement vector. This is the displacement of the given row in the result vector. To find a given value, use ``x + r[y]``. offset The amount by which to offset all values (once converted to ints) to_int A function that converts the input to an int (if given). Keyword parameters: ``minimize`` Whether or not offset all integer keys internally by the minimum value. This typically results in smaller output. ``to_int`` A callable that converts the input keys to ints. If not specified, all keys should be given as ints. >>> hash_parameters([1, 5, 7], minimize=False) HashInfo(t=3, slots=(1, 5, 7), r=(-1, -1, 1), offset=0, to_int=None) >>> hash_parameters([1, 5, 7]) HashInfo(t=3, slots=(1, 5, 7), r=(0, 0, 2), offset=-1, to_int=None) >>> l = (0, 3, 4, 7 ,10, 13, 15, 18, 19, 21, 22, 24, 26, 29, 30, 34) >>> phash = hash_parameters(l) >>> phash.slots (18, 19, 0, 21, 22, 3, 4, 24, 7, 26, 30, 10, 29, 13, 34, 15) For some values, the displacement vector will be rather empty: >>> hash_parameters('Andrea', to_int=ord).r (1, None, None, None, 0, -3, 4, None) """ # If to_int is not assigned, simply use the identity function. if to_int is None: to_int = __identity key_to_original = {to_int(original): original for original in keys} # Create a set of all items to be hashed. items = list(key_to_original.keys()) if minimize: offset = 0 - min(items) items = frozenset(x + offset for x in items) else: offset = 0 # 1. Start with a square array (not stored) that is t units on each side. # Choose a t such that t * t >= max(S) t = choose_best_t(items) assert t * t > max(items) and t * t >= len(items) # 2. Place each key K in the square at location (x,y), where # x = K mod t, y = K / t. row_queue = place_items_in_square(items, t) # 3. Arrange rows so that they'll fit into one row and generate a # displacement vector. final_row, displacement_vector = arrange_rows(row_queue, t) # Translate the internal keys to their original items. slots = tuple(key_to_original[item - offset] if item is not None else None for item in final_row) # Return the parameters return HashInfo( t=t, slots=slots, r=displacement_vector, offset=offset, to_int=to_int if to_int is not __identity else None )
223
def Base64EncodeHash(digest_value): """Returns the base64-encoded version of the input hex digest value.""" return base64.encodestring(binascii.unhexlify(digest_value)).rstrip('\n')
224
def to_cropped_imgs(ids, dir, suffix, scale): """从元组列表中返回经过剪裁的正确img""" for id, pos in ids: im = resize_and_crop(Image.open(dir + id + suffix), scale=scale) # 重新设置图片大小为原来的scale倍 yield get_square(im, pos)
225
def init_node(node_name, publish_topic): """ Init the node. Parameters ---------- node_name Name assigned to the node publish_topic Name of the publisher topic """ rospy.init_node(node_name, anonymous=True) publisher = rospy.Publisher(publish_topic, Int16MultiArray, queue_size=10) return publisher
226
def AdvectionRK4(particle, fieldset, time): """Advection of particles using fourth-order Runge-Kutta integration. Function needs to be converted to Kernel object before execution""" (u1, v1) = fieldset.UV[time, particle.depth, particle.lat, particle.lon] lon1, lat1 = (particle.lon + u1*.5*particle.dt, particle.lat + v1*.5*particle.dt) (u2, v2) = fieldset.UV[time + .5 * particle.dt, particle.depth, lat1, lon1] lon2, lat2 = (particle.lon + u2*.5*particle.dt, particle.lat + v2*.5*particle.dt) (u3, v3) = fieldset.UV[time + .5 * particle.dt, particle.depth, lat2, lon2] lon3, lat3 = (particle.lon + u3*particle.dt, particle.lat + v3*particle.dt) (u4, v4) = fieldset.UV[time + particle.dt, particle.depth, lat3, lon3] particle.lon += (u1 + 2*u2 + 2*u3 + u4) / 6. * particle.dt particle.lat += (v1 + 2*v2 + 2*v3 + v4) / 6. * particle.dt
227
def fits_checkkeyword(fitsfile, keyword, ext=0, silent=False): """ Check the keyword value of a FITS extension. Parameters ---------- fitsfile : str Path to the FITS file. keyword : str The keyword to check. ext : int or str Extension index (int) or key (str). Returns ------- Header key value If both the specified extension and keyword exist. ``None`` If a ``KeyError`` exception would have been raised and ``silent=True`` is set. Raises ------ KeyError If either the specified extension or the keyword cannot be found, and ``silent=False``, a KeyError exception will be raised. OSError If the specified file cannot be found, astropy.io.fits will raise OSError. """ import astropy.io.fits as pf fh = pf.open(fitsfile) try: return fh[ext].header[keyword] except KeyError as e: if silent: return None else: print('The specified extension or keyword is not found.') raise e
228
def check_nodes(redis_server, host_list, hpgdomain): """Count number of nodes which are accesible via the Hashpipe-Redis gateway. Use this to show which nodes are inaccessible. """ print("Counting accessible hosts:") n_accessible = 0 for host in host_list: host_key = "{}://{}/0/status".format(hpgdomain, host) host_status = redis_server.hgetall(host_key) if(len(host_status) > 0): n_accessible += 1 else: print(" {} is inaccessible".format(host)) print(host_status) print("Accessible hosts: {}".format(n_accessible))
229
def print_size_np_array(array, array_name): """Print shape and total Mb consumed by the elements of the array.""" logger.debug("Shape of {0} array: {1}".format(array_name, array.shape)) logger.debug("Size of {0}: {1:.3f} MB".format(array_name, array.nbytes / float(2 ** 20)))
230
def arr_ds(time=True, var='tmp'): """ Read in a saved dataset containing lat, lon, and values :param time: (boolean) - whether to return dataset with time :param var: (str) - variable type (only tmp/rh currently) :return ds: (xr.dataset) - dataset """ if time: if var is 'tmp': path = pre.join_cwd('data/air.sig995.1948.nc') if var is 'rh': path = pre.join_cwd('data/rhum.sig995.1948.nc') else: path = pre.join_cwd('data/slp.nc') return xr.open_dataset(path)
231
def reset_all(): """ Reset the stored values """ print('Resetting stored values') st.session_state.results = [] st.session_state.carrier_accumulation = None st.session_state.period = '' st.session_state.ran = False
232
def test_invalid_file_type(): """ To test the script behaviour if we provide an invalid file type """ expected_error_msg = 'Invalid File Type. Allowed File Type is .apk' script_error = None # Input Values app_path = get_file_location(product_test_config.INVALID_FILE) build_info = 'Image Job,9' deployment_type = 'Beta' deactivate_old_product = 'true' try: subprocess.check_output( ['python', os.path.join(FILE_DIRECTORY, 'deployment.py'), app_path, build_info, deployment_type, deactivate_old_product], shell=True) except subprocess.CalledProcessError as e: script_error = e.output # Assert that the script displays proper error message assert expected_error_msg in str(script_error), 'Script did not display error message for Invalid file type!' log.info('Proper error message was displayed for invalid file type.')
233
def clone(): """clone(n, args, inpanel) -> Node Create a clone node that behaves identical to the original. The node argument is the node to be cloned, args and inpanel are optional arguments similar to createNode. A cloned node shares the exact same properties with its original. Clones share the same set of knobs and the same control panel. However they can have different positions and connections in the render tree. Any clone, including the original, can be deleted at any time without harming any of its clones. @param n: Node. @param args: Optional number of inputs requested. @param inpanel: Optional boolean. @return: Node""" pass
234
def quarter(d): """ Return start/stop datetime for the quarter as defined by dt. """ from django_toolkit.datetime_util import quarter as datetime_quarter first_date, last_date = datetime_quarter(datetime(d.year, d.month, d.day)) return first_date.date(), last_date.date()
235
def _fp(yhat, ytrue): """ Class wise false positive count. :param yhat: :param ytrue: :return: """ yhat_true = np.asarray(yhat == np.max(yhat, axis=1, keepdims=True), dtype="float32") return np.sum(yhat_true * (1. - ytrue), axis=0)
236
def rawmap(k2i, file): """ Map index to raw data from file Arguments k2i: key-to-index map file: file containing raw data map Returns raw: index-to-raw map if file exists else identity map """ raw = {0: ''} if os.path.isfile(file): with open(file, "r") as f: for line in f.readlines(): line = line.split("\t") k, rw = line[0].strip(), line[1].strip() raw[k2i[k]] = rw else: for k in k2i: raw[k2i[k]] = k2i[k] return raw
237
def explainPlan(cursor, query, iid): """ Determine the execution plan Oracle follows to execute a specified SQL statement. """ cursor.execute("EXPLAIN PLAN SET STATEMENT_ID = '{0}' FOR ".format(iid) + query)
238
def out_flag(): """Either -o or --outfile""" return '-o' if random.randint(0, 1) else '--outfile'
239
def guess_from_peak(y, x, negative=False): """Estimate starting values from 1D peak data and return (height,center,sigma). Parameters ---------- y : array-like y data x : array-like x data negative : bool, optional determines if peak height is positive or negative, by default False Returns ------- (height, center, sigma) : (float, float, float) Estimates of 1 gaussian line parameters. """ sort_increasing = np.argsort(x) x = x[sort_increasing] y = y[sort_increasing] # find the max/min values of x and y, and the x value at max(y) maxy, miny = max(y), min(y) maxx, minx = max(x), min(x) height = maxy - miny # set a backup sigma, and center in case using the halfmax calculation doesn't work. # The backup sigma = 1/6 the full x range and the backup center is the # location of the maximum sig = (maxx - minx) / 6.0 cen = x[np.argmax(y)] # the explicit conversion to a NumPy array is to make sure that the # indexing on line 65 also works if the data is supplied as pandas.Series # find the x positions where y is above (ymax+ymin)/2 x_halfmax = np.array(x[y > (maxy + miny) / 2.0]) if negative: height = -(maxy - miny) # backup center for if negative. cen = x[np.argmin(y)] x_halfmax = x[y < (maxy + miny) / 2.0] # calculate sigma and center based on where y is above half-max: if len(x_halfmax) > 2: sig = (x_halfmax[-1] - x_halfmax[0]) / 2.0 cen = x_halfmax.mean() return height, cen, sig
240
def setup(): """ Configure SSL with letsencrypt's certbot for the domain """ server_name = ctx("nginx.server_name") path_letsencrypt = '/etc/letsencrypt/live' path_dhparams = '/etc/letsencrypt/ssl-dhparams.pem' path_key = '{}/{}/privkey.pem'.format(path_letsencrypt, server_name) path_cert = '{}/{}/fullchain.pem'.format(path_letsencrypt, server_name) if not fabtools.deb.is_installed('certbot'): execute(install) if not files.exists('/etc/letsencrypt/ssl-dhparams.pem', use_sudo=True): sudo('openssl dhparam -out /etc/letsencrypt/ssl-dhparams.pem 2048') if not files.exists('/etc/letsencrypt/options-ssl-nginx.conf', use_sudo=True): upload_template('options-ssl-nginx.conf.template', '/etc/letsencrypt/options-ssl-nginx.conf') if not files.exists(path_cert, use_sudo=True): upload_template('nginx_letsencrypt_init.template', ctx('nginx.config_path')) sudo('certbot --authenticator webroot --installer nginx -d {}'.format( server_name)) upload_template('nginx_letsencrypt.template', ctx('nginx.config_path'), context={ 'ssl': { 'letsencrypt': True, 'dhparams': path_dhparams, 'key': path_key, 'cert': path_cert, } })
241
def get_default_extension(): """ return the default view extension """ return rawData.Visualization
242
def reset(): """Resets the logging system, removing all log handlers.""" global _log_handlers, _log_handlers_limited, _initialized _log_handlers = [] _log_handlers_limited = [] _initialized = False
243
def trace(service_addr, logdir, duration_ms, worker_list='', num_tracing_attempts=3): """Sends grpc requests to profiler server to perform on-demand profiling. This method will block caller thread until receives tracing result. Args: service_addr: Address of profiler service e.g. localhost:6009. logdir: Path of TensorBoard log directory e.g. /tmp/tb_log. duration_ms: Duration of tracing or monitoring in ms. worker_list: Optional. The list of workers that we are about to profile in the current session (TPU only). num_tracing_attempts: Optional. Automatically retry N times when no trace event is collected (default 3). Raises: UnavailableError: If no trace event is collected. """ if not pywrap_tfe.TFE_ProfilerClientStartTracing( service_addr, logdir, worker_list, True, duration_ms, num_tracing_attempts): raise errors.UnavailableError(None, None, 'No trace event is collected.')
244
def getNewPluginManager() -> pluginManager.ArmiPluginManager: """ Return a new plugin manager with all of the hookspecs pre-registered. """ pm = pluginManager.ArmiPluginManager("armi") pm.add_hookspecs(ArmiPlugin) return pm
245
def get_utterances_from_stm(stm_file): """ Return list of entries containing phrase and its start/end timings :param stm_file: :return: """ res = [] with io.open(stm_file, "r", encoding='utf-8') as f: for stm_line in f: if re.match ("^;;",stm_line) is None : tokens = stm_line.split() start_time = float(tokens[3]) end_time = float(tokens[4]) filename = tokens[0] if tokens[2] != "inter_segment_gap": transcript = " ".join(t for t in tokens[6:]).strip().encode("utf-8", "ignore").decode("utf-8", "ignore") if transcript != "ignore_time_segment_in_scoring" and transcript.strip() !="": # if the transcription not empty and not equal to ignore_time_segment_in_scoring res.append({"start_time": start_time, "end_time": end_time, "filename": filename, "transcript": transcript }) return res
246
def _iter_model_rows(model, column, include_root=False): """Iterate over all row indices in a model""" indices = [QtCore.QModelIndex()] # start iteration at root for index in indices: # Add children to the iterations child_rows = model.rowCount(index) for child_row in range(child_rows): child_index = model.index(child_row, column, index) indices.append(child_index) if not include_root and not index.isValid(): continue yield index
247
def lookup_quo_marks(lang='en-US', map_files=MAP_FILES, encoding='utf-8'): """Looks up quotation marks for a language. Arguments: ``lang`` (``str``): An RFC 5646-ish language code (e.g., "en-US", "pt-BR", "de", "es"). Defines the language the quotation marks of which to look up. Default: 'en-US'. ``maps`` (sequence of ``str`` instances): A List of possible locations of mappsings of RFC 5646-like language codes to lists of quotation marks. Default: ``MAP_FILES`` (module constant). ``encoding`` (``str``): The encoding of those files. Defaults to 'utf-8'. If ``lang`` contains a country code, but no quotation marks have been defined for that country, the country code is discarded and the quotation marks for the language simpliciter are looked up. For example, 'de-DE' will find 'de'. If ``lang`` does not contain a country code or if that code has been discarded and no quotation marks have been defined for that language simpliciter, but quotation marks have been defined for variants of that language as they are spoken in a particular country, the quotation marks of the variant that has been defined first are used. For example, 'en' will find 'en-US'. Returns (``QuoMarks``): The quotation marks of that language. Raises: ``QuoMarkUnknownLanguageError``: If no quotation marks have been defined for ``lang``. All exceptions ``load_quotation_maps`` and ``QuoMarks.__init__`` raise. """ map_ = load_maps(map_files, encoding=encoding) for i in range(3): try: return QuoMarks(*map_[lang]) except KeyError: if i == 0: lang = lang.split('-')[0] elif i == 1: for j in map_: if not isinstance(j, basestring): # pylint: disable=E0602 continue if j.startswith(lang): lang = j break else: break raise QuoMarkUnknownLangError(lang=lang)
248
def extractVars(*names,**kw): """Extract a set of variables by name from another frame. :Parameters: - `*names`: strings One or more variable names which will be extracted from the caller's frame. :Keywords: - `depth`: integer (0) How many frames in the stack to walk when looking for your variables. Examples: In [2]: def func(x): ...: y = 1 ...: print extractVars('x','y') ...: In [3]: func('hello') {'y': 1, 'x': 'hello'} """ depth = kw.get('depth',0) callerNS = sys._getframe(depth+1).f_locals return dict((k,callerNS[k]) for k in names)
249
def chmod_rights(file, access_rights): """Changes access permission of a file: Read additional information in: https://docs.python.org/3/library/os.html#os.chmod https://www.geeksforgeeks.org/python-os-chmod-method/ """ access = os.chmod(file, access_rights, follow_symlinks=True)
250
def assert_array_almost_equal( x: Tuple[int, int, int, int, int, int], y: Tuple[int, int, int, int, int, int] ): """ usage.skimage: 1 """ ...
251
def replace_unwanted_xml_attrs(body): """ Method to return transformed string after removing all the unwanted characters from given xml body :param body: :return: """ return body.replace('&', '&amp;').replace('<', '&lt;').replace('>', '&gt;')
252
def protocol_version_to_kmip_version(value): """ Convert a ProtocolVersion struct to its KMIPVersion enumeration equivalent. Args: value (ProtocolVersion): A ProtocolVersion struct to be converted into a KMIPVersion enumeration. Returns: KMIPVersion: The enumeration equivalent of the struct. If the struct cannot be converted to a valid enumeration, None is returned. """ if not isinstance(value, ProtocolVersion): return None if value.major == 1: if value.minor == 0: return enums.KMIPVersion.KMIP_1_0 elif value.minor == 1: return enums.KMIPVersion.KMIP_1_1 elif value.minor == 2: return enums.KMIPVersion.KMIP_1_2 elif value.minor == 3: return enums.KMIPVersion.KMIP_1_3 elif value.minor == 4: return enums.KMIPVersion.KMIP_1_4 else: return None elif value.major == 2: if value.minor == 0: return enums.KMIPVersion.KMIP_2_0 else: return None else: return None
253
def compressStack( imageStack, blosc_threads = 1, pool_threads=maxThreads ): """ Does frame compression using a ThreadPool to distribute the load. """ blosc.set_nthreads( blosc_threads ) tPool = ThreadPool( pool_threads ) num_slices = imageStack.shape[0] # Build parameters list for the threaded processeses, consisting of index tArgs = [None] * num_slices itemSize = imageStack.dtype.itemsize bytesList = [None] * num_slices for J in np.arange(num_slices): tArgs[J] = (imageStack[J,:,:].__array_interface__['data'][0], \ N*N, itemSize, bytesList, J) # All operations are done 'in-place' tPool.map( compressSlice, tArgs ) tPool.close() tPool.join()
254
def packing_with_uncommitted_data_non_undoing(): """ This covers regression for bug #130459. When uncommitted data exists it formerly was written to the root of the blob_directory and confused our packing strategy. We now use a separate temporary directory that is ignored while packing. >>> import transaction >>> from ZODB.DB import DB >>> from ZODB.serialize import referencesf >>> blob_storage = create_storage() >>> database = DB(blob_storage) >>> connection = database.open() >>> root = connection.root() >>> from ZODB.blob import Blob >>> root['blob'] = Blob() >>> connection.add(root['blob']) >>> with root['blob'].open('w') as f: _ = f.write(b'test') >>> blob_storage.pack(new_time(), referencesf) Clean up: >>> transaction.abort() >>> connection.close() >>> blob_storage.close() >>> database.close() """
255
def UniversalInput(i, o, *args, **kwargs): """ Returns the most appropriate input UI element, based on available keys of input devices present. For now, always returns UI elements configured for character input. TODO: document arguments (most of them are passed through, like "name" or "message") """ charmap = kwargs.pop("charmap", "full") # Determining which input is necessary, according to the charmap requested numpadinputs = {"full":NumpadCharInput, "number":NumpadNumberInput, "hex":NumpadHexInput, "password":NumpadPasswordInput} numpadinput_cls = numpadinputs[charmap] # What goes here for NumpadKeyboardInput arrowkeyinput_maps = {"full":['][S', '][c', '][C', '][s', '][n'], "number":['][n'], "hex":['][h']} arrowkeyinput_maps["password"] = arrowkeyinput_maps["full"] arrowkeyinput_map = arrowkeyinput_maps[charmap] # First, checking if any of the drivers with None as available_keys is present if None in i.available_keys.values(): # HID driver (or other driver with "any key is possible" is likely used # Let's use the most fully-functional input available at the moment return numpadinput_cls(i, o, *args, **kwargs) all_available_keys = sum(i.available_keys.values(), []) ascii_keys = ["KEY_{}".format(c.upper()) for c in list("abcdefghijklmnopqrstuvwxyz123456789") + ["SPACE"]] ascii_keys_available = all([ascii_key in all_available_keys for ascii_key in ascii_keys]) action_keys = ["KEY_F1", "KEY_F2"] action_keys_available = all([action_key in all_available_keys for action_key in action_keys]) if ascii_keys_available and action_keys_available: # All required ASCII and action keys are supported return NumpadKeyboardInput(i, o, *args, **kwargs) number_keys = ["KEY_{}".format(x) for x in range(10)] number_keys.append("KEY_*") number_keys.append("KEY_#") number_keys_available = all([number_key in all_available_keys for number_key in number_keys ]) if number_keys_available and action_keys_available: # All number and action keys are supported return numpadinput_cls(i, o, *args, **kwargs) # Fallback - only needs five primary keys return CharArrowKeysInput(i, o, allowed_chars=arrowkeyinput_map, *args, **kwargs)
256
def valid_template(template): """Is this a template that returns a valid URL?""" if template.name.lower() == "google books" and ( template.has("plainurl") or template.has("plain-url") ): return True if template.name.lower() == "billboardurlbyname": return True return False
257
def wait_process(pid, *, exitcode, timeout=None): """ Wait until process pid completes and check that the process exit code is exitcode. Raise an AssertionError if the process exit code is not equal to exitcode. If the process runs longer than timeout seconds (SHORT_TIMEOUT by default), kill the process (if signal.SIGKILL is available) and raise an AssertionError. The timeout feature is not available on Windows. """ if os.name != "nt": import signal if timeout is None: timeout = SHORT_TIMEOUT t0 = time.monotonic() sleep = 0.001 max_sleep = 0.1 while True: pid2, status = os.waitpid(pid, os.WNOHANG) if pid2 != 0: break # process is still running dt = time.monotonic() - t0 if dt > SHORT_TIMEOUT: try: os.kill(pid, signal.SIGKILL) os.waitpid(pid, 0) except OSError: # Ignore errors like ChildProcessError or PermissionError pass raise AssertionError(f"process {pid} is still running " f"after {dt:.1f} seconds") sleep = min(sleep * 2, max_sleep) time.sleep(sleep) else: # Windows implementation pid2, status = os.waitpid(pid, 0) exitcode2 = os.waitstatus_to_exitcode(status) if exitcode2 != exitcode: raise AssertionError(f"process {pid} exited with code {exitcode2}, " f"but exit code {exitcode} is expected") # sanity check: it should not fail in practice if pid2 != pid: raise AssertionError(f"pid {pid2} != pid {pid}")
258
def test_calculate_distance_factor(): """ Tests calculate_distance_factor function """ assert utils.calculate_distance_factor(441.367, 100) == 0.11
259
def build_get_complex_item_null_request( **kwargs # type: Any ): # type: (...) -> HttpRequest """Get array of complex type with null item [{'integer': 1 'string': '2'}, null, {'integer': 5, 'string': '6'}]. See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this request builder into your code flow. :return: Returns an :class:`~azure.core.rest.HttpRequest` that you will pass to the client's `send_request` method. See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this response into your code flow. :rtype: ~azure.core.rest.HttpRequest Example: .. code-block:: python # response body for status code(s): 200 response.json() == [ { "integer": 0, # Optional. "string": "str" # Optional. } ] """ accept = "application/json" # Construct URL url = '/array/complex/itemnull' # Construct headers header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') return HttpRequest( method="GET", url=url, headers=header_parameters, **kwargs )
260
def maybe_load_pyjulia(): """ Execute ``julia.Julia(init_julia=False)`` if appropriate. It is useful since it skips initialization when creating the global "cached" API. This makes PyJuli initialization slightly faster and also makes sure to not load incompatible `libjulia` when the name of the julia command of this process is not `julia`. """ if (os.environ.get("IPYTHON_JL_SETUP_PYJULIA", "yes").lower() in ("yes", "t", "true")): try: from julia import Julia except ImportError: pass else: with init_julia_message_on_failure(): Julia(init_julia=False)
261
def get_choice(): """ Gets and returns choice for mode to use when running minimax """ choice = input( "Please enter a number (1 - 4)\n 1. Both players use minimax correctly at every turn\n 2. The starting player (X) is an expert and the opponent (0) only has a 50% chance to use minimax\n\t at each turn\n 3. The starting player (X) only has a 50% chance to use minimax at each turn and the opponent (0)\n\t is an expert.\n 4. Both players only have a 50% chance to use minimax at each turn.\n" ) while (choice != '1' and choice != '2' and choice != '3' and choice != '4'): choice = input("Not a choice. Go agane: (1 - 4)\n") return choice
262
def _arange_ndarray(arr, shape, axis, reverse=False): """ Create an ndarray of `shape` with increments along specified `axis` Parameters ---------- arr : ndarray Input array of arbitrary shape. shape : tuple of ints Shape of desired array. Should be equivalent to `arr.shape` except `shape[axis]` which may have any positive value. axis : int Axis to increment along. reverse : bool If False, increment in a positive fashion from 1 to `shape[axis]`, inclusive. If True, the bounds are the same but the order reversed. Returns ------- padarr : ndarray Output array sized to pad `arr` along `axis`, with linear range from 1 to `shape[axis]` along specified `axis`. Notes ----- The range is deliberately 1-indexed for this specific use case. Think of this algorithm as broadcasting `np.arange` to a single `axis` of an arbitrarily shaped ndarray. """ initshape = tuple(1 if i != axis else shape[axis] for (i, x) in enumerate(arr.shape)) if not reverse: padarr = np.arange(1, shape[axis] + 1) else: padarr = np.arange(shape[axis], 0, -1) padarr = padarr.reshape(initshape) for i, dim in enumerate(shape): if padarr.shape[i] != dim: padarr = padarr.repeat(dim, axis=i) return padarr
263
def rotate_pt(x_arr, y_arr, theta_deg, xoff=0, yoff=0): """ Rotate an array of points (x_arr, y_arr) by theta_deg offsetted from a center point by (xoff, yoff). """ # TODO: use opencv acceleration if available a_arr = x_arr - xoff b_arr = y_arr - yoff cos_t = np.cos(np.radians(theta_deg)) sin_t = np.sin(np.radians(theta_deg)) ap = (a_arr * cos_t) - (b_arr * sin_t) bp = (a_arr * sin_t) + (b_arr * cos_t) return np.asarray((ap + xoff, bp + yoff))
264
def get_frames(data: Union[sc.DataArray, sc.Dataset], **kwargs) -> sc.Dataset: """ For a supplied instrument chopper cascade and detector positions, find the locations in microseconds of the WFM frames. TODO: Currently, only the analytical (time-distance) method has been tested and is enabled. The peak-finding method is temporarily disabled. """ # if data is not None: # return frames_peakfinding(data=data, # instrument=instrument, # plot=plot, # **kwargs) # else: return frames_analytical(data=data, **kwargs)
265
def _map_nonlinearities( element: Any, nonlinearity_mapping: Type[NonlinearityMapping] = NonlinearityMapping ) -> Any: """Checks whether a string input specifies a PyTorch layer. The method checks if the input is a string. If the input is a string, it is preprocessed and then mapped to a corresponding PyTorch activation layer. If the input is not a string it is returned unchanged. Parameters ---------- element : Any Arbitrary input to this function. Returns ------- Any Returns either a callable activation or normalization layer or the input element. """ nonlinearities = nonlinearity_mapping() return _map_call_dict(nonlinearities, element)
266
def confirm(text, app, version, services=None, default_yes=False): """Asks a user to confirm the action related to GAE app. Args: text: actual text of the prompt. app: instance of Application. version: version or a list of versions to operate upon. services: list of services to operate upon (or None for all). Returns: True on approval, False otherwise. """ print(text) print(' Directory: %s' % os.path.basename(app.app_dir)) print(' App ID: %s' % app.app_id) print(' Version: %s' % version) print(' Services: %s' % ', '.join(services or app.services)) if default_yes: return raw_input('Continue? [Y/n] ') not in ('n', 'N') else: return raw_input('Continue? [y/N] ') in ('y', 'Y')
267
def _get_streamflow(product, feature_id, s_date, s_time, e_date, lag): """Downloads streamflow time series for a given river. Downloads streamflow time series for a given river feature using the HydroShare archive and Web service. Units are in cubic feet per second as returned by HydroShare. For the API description, see https://apps.hydroshare.org/apps/nwm-data-explorer/api/ Args: product: String indicating model product. Valid values are: analysis_assim, short_range, medium_range, long_range feature_id: String identifier of the river feature. s_date: (String or Date) Valid date for the model simulation. s_time: (String) Two digit simulation hour, e.g., '06'. e_date: (String or Date) End date of data to retrieve. Valid for analysis_assim only. lag: (String) Lag argument for URI. This is an escaped comma delimited list of long_range forecast simulation hours, e.g., 00z%2C06z%2C12z%2C18z. Returns: A list of dicts representing time series. Each series includes name, datetimes, and values. For example: {'name': 'Member 1 t00z', 'dates': ['2016-06-02 01:00:00+00:00', '2016-06-02 02:...'] 'values': [257.2516, 1295.7293]} Raises: HTTPError: An error occurred accessing data from HydroShare. ValueError: Service request returned no data, likely due to invalid input arguments. """ if 'long_range' in product: product = 'long_range' s_date = date_parser.parse(str(s_date)).strftime('%Y-%m-%d') if e_date: e_date = date_parser.parse(str(e_date)).strftime('%Y-%m-%d') uri_template = ( HS_API_URI + 'get-netcdf-data?config={0}&geom=channel_rt&' 'variable=streamflow&COMID={1}&' 'startDate={2}&time={3}&endDate={4}&lag={5}') uri = uri_template.format(product, feature_id, s_date, s_time, e_date, lag) response = urlopen(uri) json_data = _get_netcdf_data_response_to_json(uri, response) series_list = _unpack_series(json_data, product) return series_list
268
def main(): """Make a jazz noise here""" args = get_args() input_sudoku = [[0, 0, 0, 0, 5, 2, 0, 4, 0], [0, 0, 3, 6, 7, 0, 0, 0, 9], [0, 6, 0, 0, 0, 3, 5, 0, 0], [0, 9, 0, 0, 2, 6, 8, 0, 0], [0, 0, 0, 0, 0, 7, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 2, 0], [1, 3, 0, 0, 0, 8, 0, 0, 0], [0, 0, 4, 1, 0, 5, 0, 0, 0], [0, 0, 0, 0, 0, 4, 0, 0, 0]] # 0 means the cells where no value is assigned print('Input Sudoku:') print_table(input_sudoku) print('Input sudoku: ', input_sudoku, file=args.outfile) print('\n--------------------------------------\n') if Sudoku(input_sudoku, 0, 0): print('Here is the solver:') print_table(input_sudoku) print('Here is the solver: ', input_sudoku, file=args.outfile) else: print("Something is wrong. Please check your sudoku again!!!")
269
def debug(func): """Debug the decorated function""" @functools.wraps(func) def wrapper_debug(*args, **kwargs): args_repr = [repr(a) for a in args] kwargs_repr = [f"{k}={v!r}" for k, v in kwargs.items()] signature = ", ".join(args_repr + kwargs_repr) print(f"Calling {func.__name__}({signature})") value = func(*args, **kwargs) print(f"{func.__name__!r} returned {value!r}") return value return wrapper_debug
270
def get_profanity(text: str, duplicates=False) -> list: """Gets all profane words and returns them in a list""" text: str = text.lower() additional: list = [] profane: list = [word for word in PROFANE_WORD_LIST if word in text] if duplicates: for word in profane: c: int = text.count(word) if c > 1: x: list = [word for _ in range(c - 1)] additional.extend(list(x)) profane.extend(additional) return profane
271
def build_stats(train_result, eval_result, time_callback): """Normalizes and returns dictionary of stats. Args: train_result: The final loss at training time. eval_result: Output of the eval step. Assumes first value is eval_loss and second value is accuracy_top_1. time_callback: Time tracking callback instance. Returns: Dictionary of normalized results. """ stats = {} if eval_result: stats['eval_loss'] = eval_result[0] stats['eval_acc'] = eval_result[1] stats['train_loss'] = train_result[0] stats['train_acc'] = train_result[1] if time_callback: timestamp_log = time_callback.timestamp_log stats['step_timestamp_log'] = timestamp_log stats['train_finish_time'] = time_callback.train_finish_time if len(timestamp_log) > 1: stats['avg_exp_per_second'] = ( time_callback.batch_size * time_callback.log_steps * (len(time_callback.timestamp_log) - 1) / (timestamp_log[-1].timestamp - timestamp_log[0].timestamp)) return stats
272
def multiplicative(v1, v2, alpha=1, beta=1): """ Weighted elementwise multiplication. """ compword = str(v1.row2word[0]) + " " + str(v2.row2word[0]) comp = (alpha * v1) * (beta * v2) comp.row2word = [compword] return comp
273
def supported_formats(): """Generates dictionary entries for supported formats. Each entry will always have description, extension, mimetype, and category. Reader will provide the reader, if one exists, writer will provide the writer, if one exists. Metadata gives a list of metadata read and/or written by that type. Options provides accepted options by the format and their accepted values. """ yield ({ "description": "Brother Embroidery Format", "extension": "pec", "mimetype": "application/x-pec", "category": "embroidery", "reader": PecReader, "writer": PecWriter, "status": "stable", "metadata": ("name") }) yield ({ "description": "Brother Embroidery Format", "extension": "pes", "mimetype": "application/x-pes", "category": "embroidery", "reader": PesReader, "writer": PesWriter, "status": "stable", "options": { "pes version": (1, 6), "truncated": (True, False) }, "metadata": ("name", "author", "category", "keywords", "comments") }) yield ({ "description": "Melco Embroidery Format", "extension": "exp", "mimetype": "application/x-exp", "category": "embroidery", "status": "stable", "reader": ExpReader, "writer": ExpWriter, }) yield ({ "description": "Tajima Embroidery Format", "extension": "dst", "mimetype": "application/x-dst", "category": "embroidery", "reader": DstReader, "writer": DstWriter, "status": "stable", "options": { "extended headers": (True, False) }, "metadata": ("name") }) yield ({ "description": "Janome Embroidery Format", "extension": "jef", "mimetype": "application/x-jef", "category": "embroidery", "status": "stable", "reader": JefReader, "writer": JefWriter, }) yield ({ "description": "Pfaff Embroidery Format", "extension": "vp3", "mimetype": "application/x-vp3", "category": "embroidery", "status": "stable", "reader": Vp3Reader, "writer": Vp3Writer, }) yield ({ "description": "Scalable Vector Graphics", "extension": "svg", "mimetype": "image/svg+xml", "status": "stable", "category": "vector", "writer": SvgWriter, }) yield ({ "description": "Comma-separated values", "extension": "csv", "mimetype": "text/csv", "category": "debug", "reader": CsvReader, "writer": CsvWriter, "status": "stable", "options": { "deltas": (True, False) }, }) yield ({ "description": "Singer Embroidery Format", "extension": "xxx", "mimetype": "application/x-xxx", "category": "embroidery", "status": "stable", "reader": XxxReader }) yield ({ "description": "Janome Embroidery Format", "extension": "sew", "mimetype": "application/x-sew", "category": "embroidery", "status": "stable", "reader": SewReader }) yield ({ "description": "Barudan Embroidery Format", "extension": "u01", "mimetype": "application/x-u01", "category": "embroidery", "status": "stable", "reader": U01Reader, "writer": U01Writer }) yield ({ "description": "Husqvarna Viking Embroidery Format", "extension": "shv", "mimetype": "application/x-shv", "category": "embroidery", "status": "stable", "reader": ShvReader }) yield ({ "description": "Toyota Embroidery Format", "extension": "10o", "mimetype": "application/x-10o", "category": "embroidery", "status": "stable", "reader": A10oReader }) yield ({ "description": "Toyota Embroidery Format", "extension": "100", "mimetype": "application/x-100", "category": "embroidery", "status": "stable", "reader": A100Reader }) yield ({ "description": "Bits & Volts Embroidery Format", "extension": "bro", "mimetype": "application/x-Bro", "category": "embroidery", "status": "stable", "reader": BroReader }) yield ({ "description": "Sunstar or Barudan Embroidery Format", "extension": "dat", "mimetype": "application/x-dat", "category": "embroidery", "status": "stable", "reader": DatReader }) yield ({ "description": "Tajima(Barudan) Embroidery Format", "extension": "dsb", "mimetype": "application/x-dsb", "category": "embroidery", "status": "stable", "reader": DsbReader }) yield ({ "description": "ZSK USA Embroidery Format", "extension": "dsz", "mimetype": "application/x-dsz", "category": "embroidery", "status": "stable", "reader": DszReader }) yield ({ "description": "Elna Embroidery Format", "extension": "emd", "mimetype": "application/x-emd", "category": "embroidery", "status": "stable", "reader": EmdReader }) yield ({ "description": "Eltac Embroidery Format", "extension": "exy", # e??, e01 "mimetype": "application/x-exy", "category": "embroidery", "status": "stable", "reader": ExyReader }) yield ({ "description": "Fortron Embroidery Format", "extension": "fxy", # f??, f01 "mimetype": "application/x-fxy", "category": "embroidery", "status": "stable", "reader": FxyReader }) yield ({ "description": "Gold Thread Embroidery Format", "extension": "gt", "mimetype": "application/x-exy", "category": "embroidery", "status": "stable", "reader": GtReader }) yield ({ "description": "Inbro Embroidery Format", "extension": "inb", "mimetype": "application/x-inb", "category": "embroidery", "status": "stable", "reader": InbReader }) yield ({ "description": "Tajima Embroidery Format", "extension": "tbf", "mimetype": "application/x-tbf", "category": "embroidery", "status": "stable", "reader": TbfReader }) yield ({ "description": "Pfaff Embroidery Format", "extension": "ksm", "mimetype": "application/x-ksm", "category": "embroidery", "status": "stable", "reader": KsmReader }) yield ({ "description": "Happy Embroidery Format", "extension": "tap", "mimetype": "application/x-tap", "category": "embroidery", "status": "stable", "reader": TapReader }) yield ({ "description": "Data Stitch Embroidery Format", "extension": "stx", "mimetype": "application/x-stx", "category": "embroidery", "status": "stable", "reader": StxReader }) yield ({ "description": "Brother Embroidery Format", "extension": "phb", "mimetype": "application/x-phb", "category": "embroidery", "status": "alpha", "reader": PhbReader }) yield ({ "description": "Brother Embroidery Format", "extension": "phc", "mimetype": "application/x-phc", "category": "embroidery", "status": "alpha", "reader": PhcReader }) yield ({ "description": "Ameco Embroidery Format", "extension": "new", "mimetype": "application/x-new", "category": "embroidery", "status": "stable", "reader": NewReader }) yield ({ "description": "Pfaff Embroidery Format", "extension": "max", "mimetype": "application/x-max", "category": "embroidery", "status": "stable", "reader": MaxReader }) yield ({ "description": "Mitsubishi Embroidery Format", "extension": "mit", "mimetype": "application/x-mit", "category": "embroidery", "status": "alpha", "reader": MitReader }) yield ({ "description": "Pfaff Embroidery Format", "extension": "pcd", "mimetype": "application/x-pcd", "category": "embroidery", "status": "stable", "reader": PcdReader }) yield ({ "description": "Pfaff Embroidery Format", "extension": "pcq", "mimetype": "application/x-pcq", "category": "embroidery", "status": "stable", "reader": PcqReader }) yield ({ "description": "Pfaff Embroidery Format", "extension": "pcm", "mimetype": "application/x-pcm", "category": "embroidery", "status": "stable", "reader": PcmReader }) yield ({ "description": "Pfaff Embroidery Format", "extension": "pcs", "mimetype": "application/x-pcs", "category": "embroidery", "status": "stable", "reader": PcsReader }) yield ({ "description": "Janome Embroidery Format", "extension": "jpx", "mimetype": "application/x-jpx", "category": "embroidery", "status": "stable", "reader": JpxReader }) yield ({ "description": "Gunold Embroidery Format", "extension": "stc", "mimetype": "application/x-stc", "category": "embroidery", "status": "stable", "reader": StcReader }) # yield ({ # "description": "Zeng Hsing Embroidery Format", # "extension": "zhs", # "mimetype": "application/x-zhs", # "category": "embroidery", # "reader": ZhsReader # }) yield ({ "description": "ZSK TC Embroidery Format", "extension": "zxy", "mimetype": "application/x-zxy", "category": "embroidery", "status": "stable", "reader": ZxyReader }) yield ({ "description": "Brother Stitch Format", "extension": "pmv", "mimetype": "application/x-pmv", "category": "stitch", "status": "stable", "reader": PmvReader, "writer": PmvWriter }) yield ({ "description": "G-code Format", "extension": "txt", "mimetype": "text/g-code", "category": "embroidery", "writer": GcodeWriter, "status": "stable", "options": { "flip_x": (True, False), "flip_y": (True, False), "alternate_z": (True, False), "stitch_z_travel": (int), }, })
274
async def test_setup_without_ipv6(hass, mock_zeroconf): """Test without ipv6.""" with patch.object(hass.config_entries.flow, "async_init"), patch.object( zeroconf, "HaServiceBrowser", side_effect=service_update_mock ): mock_zeroconf.get_service_info.side_effect = get_service_info_mock assert await async_setup_component( hass, zeroconf.DOMAIN, {zeroconf.DOMAIN: {CONF_IPV6: False}} ) hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED) await hass.async_block_till_done() assert mock_zeroconf.called_with(ip_version=IPVersion.V4Only)
275
def is_information(status_code, **options): """ gets a value indicating that given status code is a information code. if returns True if the provided status code is from `InformationResponseCodeEnum` values. :param int status_code: status code to be checked. :keyword bool strict_status: specifies that it should only consider the status code as information if it is from `InformationResponseCodeEnum` values. otherwise all codes from `INFORMATION_CODE_MIN` to `INFORMATION_CODE_MAX` will be considered as information. defaults to True if not provided. :rtype: bool """ return get_component(ResponseStatusPackage.COMPONENT_NAME).is_information(status_code, **options)
276
def create_application_registration( onefuzz_instance_name: str, name: str, approle: OnefuzzAppRole, subscription_id: str ) -> Any: """Create an application registration""" app = get_application( display_name=onefuzz_instance_name, subscription_id=subscription_id ) if not app: raise Exception("onefuzz app registration not found") resource_access = [ {"id": role["id"], "type": "Scope"} for role in app["appRoles"] if role["value"] == approle.value ] params = { "isDeviceOnlyAuthSupported": True, "displayName": name, "publicClient": { "redirectUris": ["https://%s.azurewebsites.net" % onefuzz_instance_name] }, "isFallbackPublicClient": True, "requiredResourceAccess": ( [ { "resourceAccess": resource_access, "resourceAppId": app["appId"], } ] if len(resource_access) > 0 else [] ), } registered_app = query_microsoft_graph( method="POST", resource="applications", body=params, subscription=subscription_id, ) logger.info("creating service principal") service_principal_params = { "accountEnabled": True, "appRoleAssignmentRequired": False, "servicePrincipalType": "Application", "appId": registered_app["appId"], } def try_sp_create() -> None: error: Optional[Exception] = None for _ in range(10): try: query_microsoft_graph( method="POST", resource="servicePrincipals", body=service_principal_params, subscription=subscription_id, ) return except GraphQueryError as err: # work around timing issue when creating service principal # https://github.com/Azure/azure-cli/issues/14767 if ( "service principal being created must in the local tenant" not in str(err) ): raise err logger.warning( "creating service principal failed with an error that occurs " "due to AAD race conditions" ) time.sleep(60) if error is None: raise Exception("service principal creation failed") else: raise error try_sp_create() registered_app_id = registered_app["appId"] app_id = app["appId"] def try_authorize_application(data: Any) -> None: authorize_application( UUID(registered_app_id), UUID(app_id), subscription_id=subscription_id, ) retry(try_authorize_application, "authorize application") def try_assign_instance_role(data: Any) -> None: assign_instance_app_role(onefuzz_instance_name, name, subscription_id, approle) retry(try_assign_instance_role, "assingn role") return registered_app
277
def test_fnirs_channel_naming_and_order_custom_raw(): """Ensure fNIRS channel checking on manually created data.""" data = np.random.normal(size=(6, 10)) # Start with a correctly named raw intensity dataset # These are the steps required to build an fNIRS Raw object from scratch ch_names = ['S1_D1 760', 'S1_D1 850', 'S2_D1 760', 'S2_D1 850', 'S3_D1 760', 'S3_D1 850'] ch_types = np.repeat("fnirs_cw_amplitude", 6) info = create_info(ch_names=ch_names, ch_types=ch_types, sfreq=1.0) raw = RawArray(data, info, verbose=True) freqs = np.tile([760, 850], 3) for idx, f in enumerate(freqs): raw.info["chs"][idx]["loc"][9] = f freqs = np.unique(_channel_frequencies(raw.info)) picks = _check_channels_ordered(raw.info, freqs) assert len(picks) == len(raw.ch_names) assert len(picks) == 6 # Different systems use different frequencies, so ensure that works ch_names = ['S1_D1 920', 'S1_D1 850', 'S2_D1 920', 'S2_D1 850', 'S3_D1 920', 'S3_D1 850'] ch_types = np.repeat("fnirs_cw_amplitude", 6) info = create_info(ch_names=ch_names, ch_types=ch_types, sfreq=1.0) raw = RawArray(data, info, verbose=True) freqs = np.tile([920, 850], 3) for idx, f in enumerate(freqs): raw.info["chs"][idx]["loc"][9] = f picks = _check_channels_ordered(raw.info, [920, 850]) assert len(picks) == len(raw.ch_names) assert len(picks) == 6 # Catch expected errors # The frequencies named in the channel names must match the info loc field ch_names = ['S1_D1 760', 'S1_D1 850', 'S2_D1 760', 'S2_D1 850', 'S3_D1 760', 'S3_D1 850'] ch_types = np.repeat("fnirs_cw_amplitude", 6) info = create_info(ch_names=ch_names, ch_types=ch_types, sfreq=1.0) raw = RawArray(data, info, verbose=True) freqs = np.tile([920, 850], 3) for idx, f in enumerate(freqs): raw.info["chs"][idx]["loc"][9] = f with pytest.raises(ValueError, match='not ordered'): _check_channels_ordered(raw.info, [920, 850]) # Catch if someone doesn't set the info field ch_names = ['S1_D1 760', 'S1_D1 850', 'S2_D1 760', 'S2_D1 850', 'S3_D1 760', 'S3_D1 850'] ch_types = np.repeat("fnirs_cw_amplitude", 6) info = create_info(ch_names=ch_names, ch_types=ch_types, sfreq=1.0) raw = RawArray(data, info, verbose=True) with pytest.raises(ValueError, match='missing wavelength information'): _check_channels_ordered(raw.info, [920, 850]) # I have seen data encoded not in alternating frequency, but blocked. ch_names = ['S1_D1 760', 'S2_D1 760', 'S3_D1 760', 'S1_D1 850', 'S2_D1 850', 'S3_D1 850'] ch_types = np.repeat("fnirs_cw_amplitude", 6) info = create_info(ch_names=ch_names, ch_types=ch_types, sfreq=1.0) raw = RawArray(data, info, verbose=True) freqs = np.repeat([760, 850], 3) for idx, f in enumerate(freqs): raw.info["chs"][idx]["loc"][9] = f with pytest.raises(ValueError, match='channels not ordered correctly'): _check_channels_ordered(raw.info, [760, 850]) # and this is how you would fix the ordering, then it should pass raw.pick(picks=[0, 3, 1, 4, 2, 5]) _check_channels_ordered(raw.info, [760, 850])
278
def get_all_stocks(): """获取本地文件已有数据的股票列表""" stock_files = os.listdir(PATH_STOCK) stocks = [ file.split('_')[0] for file in stock_files if file.endswith('csv')] return stocks
279
def from_mel( mel_, sr=16000, n_fft=2048, n_iter=32, win_length=1000, hop_length=100, ): """ Change melspectrogram into waveform using Librosa. Parameters ---------- spectrogram: np.array Returns -------- result: np.array """ return librosa.feature.inverse.mel_to_audio( mel_, sr=sr, n_fft=n_fft, hop_length=hop_length, win_length=win_length, window='hann', center=True, pad_mode='reflect', power=1.0, n_iter=n_iter, )
280
def read(filename, conn): """ Read data from a file and send it to a pipe """ with open(filename, encoding='utf-8') as f: conn.send(f.read())
281
def validate_unique_contests(): """Should have a unique set of contests for all elections""" # Get all election ids election_ids = list(Contest.objects.distinct('election_id')) for elec_id in election_ids: contests = Contest.objects.filter(election_id=elec_id) # compare the number of contest records to unique set of contests for that election count = contests.count() expected = len(list(contests.distinct('slug'))) try: assert expected == count except AssertionError: raise AssertionError("%s contests expected for elec_id '%s', but %s found" % (expected, elec_id, count)) print "PASS: unique contests counts found for all elections"
282
def expand(arg): """ sp.expand currently has no matrix support """ if isinstance(arg, sp.Matrix): return arg.applyfunc(sp.expand) else: return sp.expand(arg)
283
def validate(obj, schema): """ validates the object against the schema, inserting default values when required """ validator(schema).validate(obj)
284
def test_table_no_headers(mocker): """This situation could never happen as parsed from Markdown. See https://stackoverflow.com/a/17543474. However this situation could happen manually when using the Table() class directly. """ fake_config = mocker.patch.object(lookatme.widgets.table, "config") fake_config.STYLE = { "style": "monokai", "table": { "column_spacing": 3, "header_divider": "&", }, } headers = None aligns = ["left", "center", "right"] rows = [ ["1", "22", "333"], ["*1*", "~~22~~", "**333**"], ] table = lookatme.widgets.table.Table(rows, headers=headers, aligns=aligns) canvas = table.render((20,)) content = list(canvas.content()) assert len(content) == 2
285
def generate_genome_index(annotation, unstranded_genome_index, stranded_genome_index, chrom_sizes): """ Create an index of the genome annotations and save it in a file. """ # Initializations intervals_dict = {} max_value = -1 prev_chrom = "" i = 0 # Line counter # Write the chromosome lengths as comment lines before the genome index with open(unstranded_genome_index, "w") as unstranded_index_fh, open(stranded_genome_index, "w") as stranded_index_fh: for key, value in chrom_sizes.items(): unstranded_index_fh.write("#%s\t%s\n" % (key, value)) stranded_index_fh.write("#%s\t%s\n" % (key, value)) # Progress bar to track the genome indexes creation nb_lines = sum(1 for _ in open(annotation)) # pbar = progressbar.ProgressBar(widgets=["Indexing the genome ", progressbar.Percentage(), " ", progressbar.Bar(), progressbar.Timer()], maxval=nb_lines).start() # Browsing the GTF file and writing into genome index files with open(annotation, "r") as gtf_fh: for line in gtf_fh: i += 1 # Update the progressbar every 1k lines # if i % 1000 == 1: # pbar.update(i) # Processing lines except comment ones if not line.startswith("#"): # Getting the line info line_split = line.rstrip().split("\t") chrom = line_split[0] cat = line_split[2] start = int(line_split[3]) - 1 stop = int(line_split[4]) strand = line_split[6] antisense = reverse_strand[strand] biotype = line_split[8].split("biotype")[1].split(";")[0].strip('" ') # Registering stored features info in the genome index file(s) if the new line concerns a new chromosome or the new line concerns an annotation not overlapping previously recorded ones if start > max_value or chrom != prev_chrom: # Write the previous features if intervals_dict: register_interval(intervals_dict, prev_chrom, stranded_genome_index, unstranded_genome_index) prev_chrom = chrom # (Re)Initializing the intervals info dict intervals_dict = {strand: {start: {biotype: [cat]}, stop: {}}, antisense: {start: {}, stop: {}}} max_value = stop # Update the dictionary which represents intervals for every distinct annotation else: # Storing the intervals on the strand of the current feature stranded_intervals = intervals_dict[strand] added_info = False # Variable to know if the features info were already added # Browsing the existing boundaries for boundary in sorted(stranded_intervals): # While the GTF line start is after the browsed boundary: keep the browsed boundary features info in case the GTF line start is before the next boundary if boundary < start: stored_feat_strand, stored_feat_antisense = [dict(stranded_intervals[boundary]), dict(intervals_dict[antisense][boundary])] # The GTF line start is already an existing boundary: store the existing features info (to manage after the GTF line stop) and update it with the GTF line features info elif boundary == start: stored_feat_strand, stored_feat_antisense = [dict(stranded_intervals[boundary]), dict(intervals_dict[antisense][boundary])] # Adding the GTF line features info to the interval try: stranded_intervals[boundary][biotype] = stranded_intervals[boundary][biotype] + [cat] except KeyError: # If the GTF line features info regard an unregistered biotype stranded_intervals[boundary][biotype] = [cat] added_info = True # The features info were added # The browsed boundary is after the GTF line start: add the GTF line features info elif boundary > start: # Create a new boundary for the GTF line start if necessary (if it is between 2 existing boundaries, it was not created before) if not added_info: stranded_intervals[start] = copy.deepcopy(stored_feat_strand) #stranded_intervals[start][biotype] = [cat] try: stranded_intervals[start][biotype].append(cat) except KeyError: stranded_intervals[start][biotype] = [cat] intervals_dict[antisense][start] = copy.deepcopy(stored_feat_antisense) added_info = True # The features info were added # While the browsed boundary is before the GTF line stop: store the existing features info (to manage after the GTF line stop) and update it with the GTF line features info if boundary < stop: stored_feat_strand, stored_feat_antisense = [dict(stranded_intervals[boundary]), dict(intervals_dict[antisense][boundary])] try: stranded_intervals[boundary][biotype] = stranded_intervals[boundary][biotype] + [cat] except KeyError: stranded_intervals[boundary][biotype] = [cat] # The GTF line stop is already exists, nothing more to do, the GTF line features info are integrated elif boundary == stop: break # The browsed boundary is after the GTF line stop: create a new boundary for the GTF line stop (with the stored features info) else: # boundary > stop stranded_intervals[stop] = copy.deepcopy(stored_feat_strand) intervals_dict[antisense][stop] = copy.deepcopy(stored_feat_antisense) break # The GTF line features info are integrated # If the GTF line stop is after the last boundary, extend the dictionary if stop > max_value: max_value = stop stranded_intervals[stop] = {} intervals_dict[antisense][stop] = {} # Store the categories of the last chromosome register_interval(intervals_dict, chrom, stranded_genome_index, unstranded_genome_index) # pbar.finish()
286
def ElementTreeToDataset(element_tree, namespaces, csv_path, load_all_data): """Convert an ElementTree tree model into a DataSet object. Args: element_tree: ElementTree.ElementTree object containing complete data from DSPL XML file namespaces: A list of (namespace_id, namespace_url) tuples csv_path: Directory where CSV files associated with dataset can be found load_all_data: Boolean indicating whether all CSV data should be loaded Returns: dspl_model.DataSet object """ dspl_dataset = dspl_model.DataSet() # Fill in basic info dspl_dataset.namespace = element_tree.getroot().get( _DSPL_SCHEMA_PREFIX + 'targetNamespace', default='') for namespace_id, namespace_url in namespaces: if namespace_id: dspl_dataset.AddImport( dspl_model.Import(namespace_id=namespace_id, namespace_url=namespace_url)) info_element = element_tree.find(_DSPL_SCHEMA_PREFIX + 'info') if info_element is not None: dspl_dataset.name = _GetValue( info_element.find(_DSPL_SCHEMA_PREFIX + 'name')) dspl_dataset.description = ( _GetValue(info_element.find(_DSPL_SCHEMA_PREFIX + 'description'))) dspl_dataset.url = ( _GetValue(info_element.find(_DSPL_SCHEMA_PREFIX + 'url'))) provider_element = element_tree.find(_DSPL_SCHEMA_PREFIX + 'provider') if provider_element is not None: dspl_dataset.provider_name = _GetValue( provider_element.find(_DSPL_SCHEMA_PREFIX + 'name')) dspl_dataset.provider_url = ( _GetValue(provider_element.find(_DSPL_SCHEMA_PREFIX + 'url'))) # Get topics topics_element = element_tree.find(_DSPL_SCHEMA_PREFIX + 'topics') if topics_element is not None: topic_elements = topics_element.findall(_DSPL_SCHEMA_PREFIX + 'topic') for topic_element in topic_elements: dspl_dataset.AddTopic(ElementToTopic(topic_element)) # Get concepts concepts_element = element_tree.find(_DSPL_SCHEMA_PREFIX + 'concepts') if concepts_element is not None: concept_elements = concepts_element.findall(_DSPL_SCHEMA_PREFIX + 'concept') for concept_element in concept_elements: dspl_dataset.AddConcept(ElementToConcept(concept_element)) # Get slices slices_element = element_tree.find(_DSPL_SCHEMA_PREFIX + 'slices') if slices_element is not None: slice_elements = slices_element.findall(_DSPL_SCHEMA_PREFIX + 'slice') for slice_element in slice_elements: dspl_dataset.AddSlice(ElementToSlice(slice_element, dspl_dataset)) # Get tables tables_element = element_tree.find(_DSPL_SCHEMA_PREFIX + 'tables') if tables_element is not None: table_elements = tables_element.findall(_DSPL_SCHEMA_PREFIX + 'table') for table_element in table_elements: dspl_dataset.AddTable( ElementToTable(table_element, csv_path, load_all_data)) return dspl_dataset
287
def load_config(path): """ load the config of LSTMLM """ if path.rfind('.ckpt') != -1: path_name = path[0: path.rfind('.ckpt')] else: path_name = path with open(path_name + '.config', 'rt') as f: name = f.readline().split()[0] config = wb.Config.load(f) return config
288
def box_plot_stats( ## arguments / inputs x, ## input array of values coef = 1.5 ## positive real number ## (determines how far the whiskers extend from the iqr) ): """ calculates box plot five-number summary: the lower whisker extreme, the lower ‘hinge’ (observed value), the median, the upper ‘hinge’, and upper whisker extreme (observed value) returns a results dictionary containing 2 items: "stats" and "xtrms" 1) the "stats" item contains the box plot five-number summary as an array 2) the "xtrms" item contains values which lie beyond the box plot extremes functions much the same as R's 'boxplot.stats()' function for which this Python implementation was predicated ref: The R Project for Statistical Computing. (2019). Box Plot Statistics. http://finzi.psych.upenn.edu/R/library/grDevices/html/boxplot.stats.html. Tukey, J. W. (1977). Exploratory Data Analysis. Section 2C. McGill, R., Tukey, J.W. and Larsen, W.A. (1978). Variations of Box Plots. The American Statistician, 32:12-16. http://dx.doi.org/10.2307/2683468. Velleman, P.F. and Hoaglin, D.C. (1981). Applications, Basics and Computing of Exploratory Data Analysis. Duxbury Press. Emerson, J.D. and Strenio, J. (1983). Boxplots and Batch Comparison. Chapter 3 of Understanding Robust and Exploratory Data Analysis, eds. D.C. Hoaglin, F. Mosteller and J.W. Tukey. Wiley. Chambers, J.M., Cleveland, W.S., Kleiner, B. and Tukey, P.A. (1983). Graphical Methods for Data Analysis. Wadsworth & Brooks/Cole. """ ## quality check for coef if coef <= 0: raise ValueError("cannot proceed: coef must be greater than zero") ## convert input to numpy array x = np.array(x) ## determine median, lower ‘hinge’, upper ‘hinge’ median = np.quantile(a = x, q = 0.50, interpolation = "midpoint") first_quart = np.quantile(a = x, q = 0.25, interpolation = "midpoint") third_quart = np.quantile(a = x, q = 0.75, interpolation = "midpoint") ## calculate inter quartile range intr_quart_rng = third_quart - first_quart ## calculate extreme of the lower whisker (observed, not interpolated) lower = first_quart - (coef * intr_quart_rng) lower_whisk = np.compress(x >= lower, x) lower_whisk_obs = np.min(lower_whisk) ## calculate extreme of the upper whisker (observed, not interpolated) upper = third_quart + (coef * intr_quart_rng) upper_whisk = np.compress(x <= upper, x) upper_whisk_obs = np.max(upper_whisk) ## store box plot results dictionary boxplot_stats = {} boxplot_stats["stats"] = np.array([lower_whisk_obs, first_quart, median, third_quart, upper_whisk_obs]) ## store observations beyond the box plot extremes boxplot_stats["xtrms"] = np.array(x[(x < lower_whisk_obs) | (x > upper_whisk_obs)]) ## return dictionary return boxplot_stats
289
def handle_echo(reader, writer): """Nehme Kommandos von PureData entgegen und sende sie an die Bulb netcat -l 8081 nc localhost 8081 nc dahomey.local 8081 """ # Verhindere minutenlange Warteschlangen global tiefe if tiefe >= MAX_PUFFER: if args.verbose: print("{} {} !PD: >MAX_PUFFER".format(now(), tiefe + 1)) # Gib auf, aber stoppe zuvor alle hängigen Requests for task in asyncio.Task.all_tasks(): task.cancel() writer.close() return tiefe += 1 try: # Als coroutine kein @profile(echo_times) möglich, da formal sofort return if args.profile: beginrequest = time.time() (request_ip, _) = writer.get_extra_info('peername') # Schritt 1: Handle und beende den Request von pd data = yield from reader.read(255) puredata = data.decode().strip() if args.verbose: print("{} {} <PD: {}".format(now(), tiefe, repr(puredata))) # issue #1 race condition: >1 netsend commands get joined when they pile up # causes 'socket.send() raised exception.' when pd is not listeing no more messages = filter(None, puredata.split(';')) for msg in messages: peer_ip = None command = None bulb, *cmdarg = msg.strip().split() if not cmdarg and bulb == 'peer_ip': # Hook: IP-Adresse des iOS-Clients zurückmelden peer_ip = request_ip if args.verbose: print("Client: {0}".format(peer_ip)) elif cmdarg and (len(cmdarg) == 2): # ignoriere ill-formed cmd arg cmd, arg = cmdarg x = int(float(arg)) # pd sliders sind 0..1 command = commands[cmd](bulb, x) # issue #1: don't wait for a reply in pd yield from writer.drain() writer.close() if peer_ip: # Peer-IP synchron an den Server melden, wenn der Request vom iOS-Client kommt peer_ip_fudi = "peer_ip {0};".format(peer_ip) # FUDI if args.verbose: print(">{0}:{1} {2}".format(HOST_PD, PORT_PD, peer_ip_fudi)) try: netsend_socket(HOST_PD, PORT_PD_PEER, peer_ip_fudi) netsend_socket(HOST_PD, PORT_PD_PEER, "localcontrol 1;") netsend_socket(HOST_PD, PORT_PD_PEER, "localgui 0;") netsend_socket(peer_ip, PORT_PD_PEER, "localcontrol 0;") netsend_socket(peer_ip, PORT_PD_PEER, "localgui 1;") except ConnectionRefusedError: if args.verbose: print("No local PureData on port {0}".format(PORT_PD)) netsend_socket(peer_ip, PORT_PD_PEER, "localcontrol 1;") netsend_socket(peer_ip, PORT_PD_PEER, "localgui 1;") else: # Schritt 2: Request an die u.U. langsame Bulb in einem separaten Thread if args.verbose: print("{} {} >Bulb: {}".format(now(), tiefe, command)) reply = yield from loop.run_in_executor(None, post, bulb, command) if args.verbose: print("{} {} <Bulb: {}".format(now(), tiefe, reply)) # Sende Response mit netsend an das netreceive in pd yield from loop.run_in_executor(None, netsend, request_ip, bulb, reply) if args.profile: endresponse = time.time() echo_times.append(endresponse - beginrequest) finally: tiefe -= 1
290
def test_ms_infer_for_func(): """ test_ms_infer_for_func """ ms_infer_for_func(1.0)
291
def expand_multinomial(expr, deep=True): """ Wrapper around expand that only uses the multinomial hint. See the expand docstring for more information. Examples ======== >>> from sympy import symbols, expand_multinomial, exp >>> x, y = symbols('x y', positive=True) >>> expand_multinomial((x + exp(x + 1))**2) x**2 + 2*x*exp(x + 1) + exp(2*x + 2) """ return sympify(expr).expand(deep=deep, mul=False, power_exp=False, power_base=False, basic=False, multinomial=True, log=False)
292
def unpickle_tokens(filepath): """Unpickle the tokens into memory.""" try: with open(filepath+'_tokens.pickle', 'rb') as f: tokens = pickle.load(f) except FileNotFoundError: tokens = tokenize_and_tag(filepath) pickle_tokens(tokens, filepath) return tokens
293
def make_cov(df, columns=["parallax", "pmra", "pmdec"]): """Generate covariance matrix from Gaia data columns : list list of columns to calculate covariance. Must be a subset of 'ra', 'dec' 'parallax', 'pmra', 'pmdec'. Returns ------- numpy.array (N, number of columns) array of covariance matrices """ gaia_order = ["ra", "dec", "parallax", "pmra", "pmdec"] N = len(np.atleast_1d(df[columns[0] + "_error"])) # N could be 1 n = len(columns) C = np.zeros([N, n, n]) for i, j in zip(*np.triu_indices(n)): if i == j: C[:, [i], [j]] = np.atleast_1d( df[f"{columns[i]}_error"] * df[f"{columns[j]}_error"] )[:, None] else: corr_name = ( "_".join( sorted([columns[i], columns[j]], key=lambda x: gaia_order.index(x)) ) + "_corr" ) C[:, [i, j], [j, i]] = np.atleast_1d( df[f"{columns[i]}_error"] * df[f"{columns[j]}_error"] * df[corr_name] )[:, None] return C.squeeze()
294
def kinetics(request, section='', subsection=''): """ The RMG database homepage. """ # Make sure section has an allowed value if section not in ['libraries', 'families', '']: raise Http404 # Load the kinetics database, if necessary database.load('kinetics', section) # Determine which subsection we wish to view db = None try: db = database.get_kinetics_database(section, subsection) except ValueError: pass if db is not None: # A subsection was specified, so render a table of the entries in # that part of the database is_group_database = False # Sort entries by index if db.top is not None and len(db.top) > 0: # If there is a tree in this database, only consider the entries # that are in the tree entries0 = getDatabaseTreeAsList(db, db.top) tree = '<ul class="kineticsTree">\n{0}\n</ul>\n'.format(getKineticsTreeHTML(db, section, subsection, db.top)) else: # If there is not a tree, consider all entries entries0 = list(db.entries.values()) if any(isinstance(item, list) for item in entries0): # if the entries are lists entries0 = reduce(lambda x, y: x+y, entries0) # Sort the entries by index and label entries0.sort(key=lambda entry: (entry.index, entry.label)) tree = '' entries = [] for entry0 in entries0: if isinstance(entry0.data, str): data_format = 'Link' else: data_format = entry0.data.__class__.__name__ entry = { 'index': entry0.index, 'label': entry0.label, 'dataFormat': data_format, } if isinstance(db, KineticsGroups): is_group_database = True entry['structure'] = getStructureInfo(entry0.item) entry['parent'] = entry0.parent entry['children'] = entry0.children elif 'rules' in subsection: if isinstance(entry0.item, list): # if the reactants are not group objects, then this rate rule came from # the averaging step, and we don't want to show all of the averaged nodes # in the web view. We only want to show nodes with direct values or # training rates that became rate rules. continue else: entry['reactants'] = ' + '.join([getStructureInfo(reactant) for reactant in entry0.item.reactants]) entry['products'] = ' + '.join([getStructureInfo(reactant) for reactant in entry0.item.products]) entry['arrow'] = '&hArr;' if entry0.item.reversible else '&rarr;' else: entry['reactants'] = ' + '.join([getStructureInfo(reactant) for reactant in entry0.item.reactants]) entry['products'] = ' + '.join([getStructureInfo(reactant) for reactant in entry0.item.products]) entry['arrow'] = '&hArr;' if entry0.item.reversible else '&rarr;' entries.append(entry) return render(request, 'kineticsTable.html', {'section': section, 'subsection': subsection, 'databaseName': db.name, 'databaseDesc': db.long_desc, 'entries': entries, 'tree': tree, 'isGroupDatabase': is_group_database}) else: # No subsection was specified, so render an outline of the kinetics # database components kinetics_libraries = [(label, library) for label, library in database.kinetics.libraries.items() if subsection in label] kinetics_libraries.sort() # If this is a subsection, but not the main kinetics page, # we don't need to iterate through the entire database, as this takes a long time to load. try: families_to_process = [database.kinetics.families[subsection]] except KeyError: # if main kinetics page, or some other error families_to_process = database.kinetics.families.values() for family in families_to_process: for i in range(0, len(family.depositories)): if 'untrained' in family.depositories[i].name: family.depositories.pop(i) family.depositories.append(getUntrainedReactions(family)) kinetics_families = [(label, family) for label, family in database.kinetics.families.items() if subsection in label] kinetics_families.sort() return render(request, 'kinetics.html', {'section': section, 'subsection': subsection, 'kineticsLibraries': kinetics_libraries, 'kineticsFamilies': kinetics_families})
295
def test_safe_std(): """Checks that the calculated standard deviation is correct.""" array = np.array((1, 2, 3)) calc_std = _weighting._safe_std(array) assert_allclose(calc_std, np.std(array))
296
def cot_to_cot(craft: dict, known_craft: dict = {}) -> str: """ Given an input CoT XML Event with an ICAO Hex as the UID, will transform the Event's name, callsign & CoT Event Type based on known craft input database (CSV file). """ return xml.etree.ElementTree.tostring(cot_to_cot_xml(craft, known_craft))
297
def update_export(module, export, filesystem, system): """ Create new filesystem or update existing one""" assert export changed = False name = module.params['name'] client_list = module.params['client_list'] if client_list: if set(map(transform, unmunchify(export.get_permissions()))) \ != set(map(transform, client_list)): if not module.check_mode: export.update_permissions(client_list) changed = True return changed
298
def create_kdf(kdf_type: str) -> typing.Type[KDF]: """Returns the class corresponding to the given key derivation function type name. Args: kdf_type The name of the OpenSSH private key key derivation function type. Returns: The subclass of :py:class:`KDF` corresponding to the key derivation function type name. Raises: KeyError: There is no subclass of :py:class:`KDF` corresponding to the given key derivation function type name. """ return _KDF_MAPPING[kdf_type]
299